code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["MobileViTFeatureExtractor"]
UpperCAmelCase__ = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = 'luke'
def __init__( self: Any , UpperCamelCase_: Optional[int]=5_02_67 , UpperCamelCase_: List[str]=50_00_00 , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: List[Any]=2_56 , UpperCamelCase_: str=12 , UpperCamelCase_: List[Any]=12 , UpperCamelCase_: Dict=30_72 , UpperCamelCase_: Any="gelu" , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: int=5_12 , UpperCamelCase_: str=2 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Dict=None , UpperCamelCase_: List[str]=1 , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Optional[Any]=2 , **UpperCamelCase_: Optional[Any] , ):
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
__lowerCamelCase = vocab_size
__lowerCamelCase = entity_vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = entity_emb_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_entity_aware_attention
__lowerCamelCase = classifier_dropout
| 29 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase_ = datasets.utils.logging.get_logger(__name__)
class lowerCamelCase__( folder_based_builder.FolderBasedBuilderConfig):
UpperCAmelCase__ : bool = None
UpperCAmelCase__ : bool = None
class lowerCamelCase__( folder_based_builder.FolderBasedBuilder):
UpperCAmelCase__ : List[Any] = datasets.Audio()
UpperCAmelCase__ : str = 'audio'
UpperCAmelCase__ : Union[str, Any] = AudioFolderConfig
UpperCAmelCase__ : List[str] # definition at the bottom of the script
UpperCAmelCase__ : Optional[int] = AudioClassification(audio_column='audio' , label_column='label')
UpperCAmelCase_ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
UpperCAmelCase_ = AUDIO_EXTENSIONS
| 29 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ : Any = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 328 |
def A_ ( snake_case : str ) -> int:
'''simple docstring'''
assert column_title.isupper()
__UpperCamelCase = 0
__UpperCamelCase = len(snake_case ) - 1
__UpperCamelCase = 0
while index >= 0:
__UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26 , snake_case )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 328 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = '''▁'''
SCREAMING_SNAKE_CASE :Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
SCREAMING_SNAKE_CASE :Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
SCREAMING_SNAKE_CASE :Optional[Any] = {
'''google/reformer-crime-and-punishment''': 52_4288,
}
class UpperCAmelCase ( a_ ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] ,A : List[str] ,A : Optional[int]="</s>" ,A : List[Any]="<unk>" ,A : Optional[Any]=[] ,A : Optional[Dict[str, Any]] = None ,**A : int ,):
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ ,unk_token=lowercase_ ,additional_special_tokens=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,)
__A = vocab_file
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCamelCase_ ( self : List[str] ):
return self.sp_model.get_piece_size()
def UpperCamelCase_ ( self : Any ):
__A = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : List[str] ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Dict ,A : str ):
return self.sp_model.encode(lowercase_ ,out_type=lowercase_ )
def UpperCamelCase_ ( self : List[Any] ,A : int ):
return self.sp_model.piece_to_id(lowercase_ )
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
if index < self.sp_model.get_piece_size():
__A = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCamelCase_ ( self : Optional[Any] ,A : List[Any] ):
__A = []
__A = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
__A = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCamelCase_ ( self : Optional[Any] ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
lowercase_ ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 358 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True} )
snake_case_ = Features({"audio": Audio()} )
snake_case_ = Features({"transcription": Value("string" )} )
snake_case_ = "audio"
snake_case_ = "transcription"
def UpperCamelCase_ ( self : Optional[int] ,A : Union[str, Any] ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] ,A ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
__A = copy.deepcopy(self )
__A = self.input_schema.copy()
__A = features[self.audio_column]
__A = input_schema
return task_template
@property
def UpperCamelCase_ ( self : int ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 124 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowerCAmelCase ( _A ):
lowerCAmelCase_ = "beit"
def __init__( self : Dict , __lowercase : Union[str, Any]=8192 , __lowercase : str=768 , __lowercase : Optional[int]=12 , __lowercase : Union[str, Any]=12 , __lowercase : Union[str, Any]=3072 , __lowercase : str="gelu" , __lowercase : Dict=0.0 , __lowercase : List[Any]=0.0 , __lowercase : Optional[int]=0.0_2 , __lowercase : Union[str, Any]=1E-12 , __lowercase : Any=224 , __lowercase : int=16 , __lowercase : Dict=3 , __lowercase : Tuple=False , __lowercase : Optional[Any]=False , __lowercase : List[str]=False , __lowercase : List[Any]=False , __lowercase : List[Any]=0.1 , __lowercase : List[Any]=0.1 , __lowercase : str=True , __lowercase : Tuple=[3, 5, 7, 11] , __lowercase : Dict=[1, 2, 3, 6] , __lowercase : str=True , __lowercase : Tuple=0.4 , __lowercase : List[Any]=256 , __lowercase : str=1 , __lowercase : Union[str, Any]=False , __lowercase : List[str]=255 , **__lowercase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**__lowercase )
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =image_size
__lowercase =patch_size
__lowercase =num_channels
__lowercase =use_mask_token
__lowercase =use_absolute_position_embeddings
__lowercase =use_relative_position_bias
__lowercase =use_shared_relative_position_bias
__lowercase =layer_scale_init_value
__lowercase =drop_path_rate
__lowercase =use_mean_pooling
# decode head attributes (semantic segmentation)
__lowercase =out_indices
__lowercase =pool_scales
# auxiliary head attributes (semantic segmentation)
__lowercase =use_auxiliary_head
__lowercase =auxiliary_loss_weight
__lowercase =auxiliary_channels
__lowercase =auxiliary_num_convs
__lowercase =auxiliary_concat_input
__lowercase =semantic_loss_ignore_index
class lowerCAmelCase ( _A ):
lowerCAmelCase_ = version.parse("1.11" )
@property
def snake_case ( self : int ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self : Dict ):
"""simple docstring"""
return 1E-4
| 141 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Any , A : int=None , **A : str ) -> Union[str, Any]:
super().__init__(features=A )
lowercase_ : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def A ( self : Dict , A : int ) -> List[Any]:
import torch
if isinstance(A , A ) and column:
if all(
isinstance(A , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(A )
return column
def A ( self : int , A : Any ) -> Optional[Any]:
import torch
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase_ : Any = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase_ : Any = {'''dtype''': torch.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase_ : Dict = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
lowercase_ : Dict = np.asarray(A )
return torch.tensor(A , **{**default_dtype, **self.torch_tensor_kwargs} )
def A ( self : Union[str, Any] , A : Optional[int] ) -> str:
import torch
# support for torch, tf, jax etc.
if hasattr(A , '''__array__''' ) and not isinstance(A , torch.Tensor ):
lowercase_ : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def A ( self : Dict , A : dict ) -> Tuple:
return map_nested(self._recursive_tensorize , A , map_list=A )
def A ( self : str , A : pa.Table ) -> Mapping:
lowercase_ : Optional[Any] = self.numpy_arrow_extractor().extract_row(A )
lowercase_ : str = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def A ( self : List[Any] , A : pa.Table ) -> "torch.Tensor":
lowercase_ : List[str] = self.numpy_arrow_extractor().extract_column(A )
lowercase_ : str = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
lowercase_ : Optional[int] = self.recursive_tensorize(A )
lowercase_ : Any = self._consolidate(A )
return column
def A ( self : List[str] , A : pa.Table ) -> Mapping:
lowercase_ : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
lowercase_ : int = self.python_features_decoder.decode_batch(A )
lowercase_ : Dict = self.recursive_tensorize(A )
for column_name in batch:
lowercase_ : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 33 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
def __init__(self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=4 , ):
A_ : Dict = parent
A_ : List[str] = batch_size
A_ : Any = seq_length
A_ : Tuple = is_training
A_ : Tuple = use_attention_mask
A_ : Union[str, Any] = use_token_type_ids
A_ : str = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Optional[int] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : str = intermediate_size
A_ : Dict = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : Optional[Any] = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : Dict = type_sequence_label_size
A_ : Union[str, Any] = initializer_range
A_ : Optional[Any] = num_choices
def _a (self ):
A_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ : str = None
if self.use_attention_mask:
A_ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ : int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ):
A_ : Any = self.prepare_config_and_inputs()
A_ : Optional[Any] = config_and_inputs
A_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _a (self ):
A_ : Tuple = self.prepare_config_and_inputs()
A_ : List[str] = config_and_inputs
A_ : List[Any] = True
A_ : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( _a , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Tuple = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
A_ : Union[str, Any] = FlaxBertModelTester(self )
@slow
def _a (self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
A_ : List[Any] = FlaxBertModel.from_pretrained("""bert-base-cased""" )
A_ : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a ) | 370 |
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _lowerCAmelCase :
@property
def _a (self ):
return self.get_dummy_input()
@property
def _a (self ):
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _a (self , lowercase=True , lowercase=False , lowercase=False , lowercase=False , ):
A_ : List[str] = 4
A_ : int = 32
A_ : Optional[int] = (32, 32)
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : int = torch.device(lowercase )
A_ : int = (batch_size, num_channels) + sizes
A_ : Optional[int] = randn_tensor(lowercase , generator=lowercase , device=lowercase )
A_ : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
A_ : str = 128
A_ : List[Any] = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
A_ : List[str] = torch.manual_seed(1 )
A_ : int = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
A_ : List[str] = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
A_ : Dict = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def _a (self ):
A_ : Tuple = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A_ : Any = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _a (self , lowercase ):
A_, A_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
A_ : int = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
A_ : List[str] = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : Union[str, Any] = output[0]
self.assertEqual(output.shape , self.output_shape )
A_ : int = output[0, -1, -3:, -3:]
A_ : List[Any] = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _a (self ):
A_, A_ : Tuple = self.prepare_init_args_and_inputs_for_common()
A_ : List[str] = self.block_class(**lowercase )
model.to(lowercase )
model.train()
A_ : Any = model(**lowercase )
if isinstance(lowercase , lowercase ):
A_ : str = output[0]
A_ : Union[str, Any] = torch.device(lowercase )
A_ : Tuple = randn_tensor(output.shape , device=lowercase )
A_ : List[str] = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 135 | 0 |
from collections import namedtuple
UpperCAmelCase_ = namedtuple('from_to', 'from_ to')
UpperCAmelCase_ = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def lowerCamelCase__ ( A__ : float , A__ : str , A__ : str ):
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(A__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(A__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _lowerCAmelCase : List[Any]=10_000 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="relu" , _lowerCAmelCase : Union[str, Any]=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=6_000 , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=(5, 5) , _lowerCAmelCase : str=1_024 , _lowerCAmelCase : str=80 , _lowerCAmelCase : Tuple=1 , **_lowerCAmelCase : Any , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) | 225 | 0 |
def __magic_name__ ( __a : bytes ):
'''simple docstring'''
return "".join([hex(__a )[2:].zfill(2 ).upper() for byte in list(__a )] )
def __magic_name__ ( __a : str ):
'''simple docstring'''
if (len(__a ) % 2) != 0:
raise ValueError(
"""Base16 encoded data is invalid:
Data does not have an even number of hex digits.""" )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(__a ) <= set("""0123456789ABCDEF""" ):
raise ValueError(
"""Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.""" )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(__a ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , **SCREAMING_SNAKE_CASE_ ):
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {}
UpperCamelCase__ = {}
UpperCamelCase__ = {}
# preprocess args
if "points_per_batch" in kwargs:
UpperCamelCase__ = kwargs["""points_per_batch"""]
if "points_per_crop" in kwargs:
UpperCamelCase__ = kwargs["""points_per_crop"""]
if "crops_n_layers" in kwargs:
UpperCamelCase__ = kwargs["""crops_n_layers"""]
if "crop_overlap_ratio" in kwargs:
UpperCamelCase__ = kwargs["""crop_overlap_ratio"""]
if "crop_n_points_downscale_factor" in kwargs:
UpperCamelCase__ = kwargs["""crop_n_points_downscale_factor"""]
# postprocess args
if "pred_iou_thresh" in kwargs:
UpperCamelCase__ = kwargs["""pred_iou_thresh"""]
if "stability_score_offset" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_offset"""]
if "mask_threshold" in kwargs:
UpperCamelCase__ = kwargs["""mask_threshold"""]
if "stability_score_thresh" in kwargs:
UpperCamelCase__ = kwargs["""stability_score_thresh"""]
if "crops_nms_thresh" in kwargs:
UpperCamelCase__ = kwargs["""crops_nms_thresh"""]
if "output_rle_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_rle_mask"""]
if "output_bboxes_mask" in kwargs:
UpperCamelCase__ = kwargs["""output_bboxes_mask"""]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__(self , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ):
return super().__call__(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , num_workers=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=64 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 5_12 / 15_00 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 1 , ):
UpperCamelCase__ = load_image(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor.size["""longest_edge"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.generate_crop_boxes(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
UpperCamelCase__ = self.get_inference_context()
with inference_context():
UpperCamelCase__ = self._ensure_tensor_on_device(SCREAMING_SNAKE_CASE_ , device=self.device )
UpperCamelCase__ = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
UpperCamelCase__ = image_embeddings
UpperCamelCase__ = grid_points.shape[1]
UpperCamelCase__ = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = grid_points[:, i : i + points_per_batch, :, :]
UpperCamelCase__ = input_labels[:, i : i + points_per_batch]
UpperCamelCase__ = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0.88 , SCREAMING_SNAKE_CASE_=0.95 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=1 , ):
UpperCamelCase__ = model_inputs.pop("""input_boxes""" )
UpperCamelCase__ = model_inputs.pop("""is_last""" )
UpperCamelCase__ = model_inputs.pop("""original_sizes""" ).tolist()
UpperCamelCase__ = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
UpperCamelCase__ = self.model(**SCREAMING_SNAKE_CASE_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
UpperCamelCase__ = model_outputs["""pred_masks"""]
UpperCamelCase__ = self.image_processor.post_process_masks(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , binarize=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_outputs["""iou_scores"""]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.7 , ):
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.cat(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.image_processor.post_process_for_mask_generation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = defaultdict(SCREAMING_SNAKE_CASE_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {}
if output_rle_mask:
UpperCamelCase__ = rle_mask
if output_bboxes_mask:
UpperCamelCase__ = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 178 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = 256
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = ['''melgan''']
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> None:
super().__init__()
# From MELGAN
UpperCAmelCase_ : Optional[Any] = math.log(1E-5 ) # Matches MelGAN training.
UpperCAmelCase_ : Any = 4.0 # Largest value for most examples
UpperCAmelCase_ : Optional[int] = 1_2_8
self.register_modules(
notes_encoder=_UpperCamelCase , continuous_encoder=_UpperCamelCase , decoder=_UpperCamelCase , scheduler=_UpperCamelCase , melgan=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=(-1.0, 1.0) , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = output_range
if clip:
UpperCAmelCase_ : int = torch.clip(_UpperCamelCase , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCAmelCase_ : List[str] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=(-1.0, 1.0) , _UpperCamelCase=False ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = input_range
UpperCAmelCase_ : int = torch.clip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if clip else outputs
# Scale to [0, 1].
UpperCAmelCase_ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> int:
UpperCAmelCase_ : List[Any] = input_tokens > 0
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.notes_encoder(
encoder_input_tokens=_UpperCamelCase , encoder_inputs_mask=_UpperCamelCase )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.continuous_encoder(
encoder_inputs=_UpperCamelCase , encoder_inputs_mask=_UpperCamelCase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Tuple = noise_time
if not torch.is_tensor(_UpperCamelCase ):
UpperCAmelCase_ : int = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_UpperCamelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[str] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ : List[str] = self.decoder(
encodings_and_masks=_UpperCamelCase , decoder_input_tokens=_UpperCamelCase , decoder_noise_time=_UpperCamelCase )
return logits
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = 1_0_0 , _UpperCamelCase = True , _UpperCamelCase = "numpy" , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(_UpperCamelCase )}." )
UpperCAmelCase_ : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCAmelCase_ : Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCAmelCase_ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCamelCase , device=self.device )
for i, encoder_input_tokens in enumerate(_UpperCamelCase ):
if i == 0:
UpperCAmelCase_ : Any = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCAmelCase_ : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_UpperCamelCase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCAmelCase_ : Optional[Any] = ones
UpperCAmelCase_ : Union[str, Any] = self.scale_features(
_UpperCamelCase , output_range=[-1.0, 1.0] , clip=_UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_UpperCamelCase , continuous_mask=_UpperCamelCase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCAmelCase_ : List[Any] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_UpperCamelCase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_UpperCamelCase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCAmelCase_ : str = self.decode(
encodings_and_masks=_UpperCamelCase , input_tokens=_UpperCamelCase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCAmelCase_ : List[str] = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
UpperCAmelCase_ : Tuple = self.scale_to_features(_UpperCamelCase , input_range=[-1.0, 1.0] )
UpperCAmelCase_ : List[str] = mel[:1]
UpperCAmelCase_ : str = mel.cpu().float().numpy()
UpperCAmelCase_ : Dict = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase )
logger.info('Generated segment' , _UpperCamelCase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' )
if output_type == "numpy":
UpperCAmelCase_ : List[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCAmelCase_ : str = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_UpperCamelCase )
| 29 |
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : list[list[int]] = [[0 for _ in range(__snake_case )] for _ in range(m + 1 )]
for i in range(m + 1 ):
UpperCAmelCase_ : Optional[Any] = 1
for n in range(m + 1 ):
for k in range(1 , __snake_case ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__UpperCAmelCase = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 29 | 1 |
'''simple docstring'''
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = [int(__A ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(__A ) == 4 and all(0 <= int(__A ) <= 254 for octet in octets )
if __name__ == "__main__":
a__ : Optional[Any] = input().strip()
a__ : Optional[Any] = 'valid' if is_ip_va_address_valid(ip) else 'invalid'
print(f'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 243 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = DistilBertTokenizer
__SCREAMING_SNAKE_CASE = DistilBertTokenizerFast
__SCREAMING_SNAKE_CASE = True
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
__UpperCamelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase )
__UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 243 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=10 , lowerCamelCase__=3 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=2 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=10 , lowerCamelCase__=0.02 , lowerCamelCase__=0.9 , lowerCamelCase__=None , ):
"""simple docstring"""
__UpperCamelCase : str =parent
__UpperCamelCase : List[str] =batch_size
__UpperCamelCase : Optional[Any] =image_size
__UpperCamelCase : Any =num_channels
__UpperCamelCase : Tuple =patch_size
__UpperCamelCase : List[Any] =tubelet_size
__UpperCamelCase : Dict =num_frames
__UpperCamelCase : Dict =is_training
__UpperCamelCase : Optional[int] =use_labels
__UpperCamelCase : Dict =hidden_size
__UpperCamelCase : Optional[Any] =num_hidden_layers
__UpperCamelCase : List[Any] =num_attention_heads
__UpperCamelCase : Union[str, Any] =intermediate_size
__UpperCamelCase : int =hidden_act
__UpperCamelCase : Dict =hidden_dropout_prob
__UpperCamelCase : Dict =attention_probs_dropout_prob
__UpperCamelCase : str =type_sequence_label_size
__UpperCamelCase : Dict =initializer_range
__UpperCamelCase : str =mask_ratio
__UpperCamelCase : Tuple =scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__UpperCamelCase : List[Any] =(image_size // patch_size) ** 2
__UpperCamelCase : Any =(num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__UpperCamelCase : Tuple =int(mask_ratio * self.seq_length )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase : List[str] =None
if self.use_labels:
__UpperCamelCase : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase : Tuple =self.get_config()
return config, pixel_values, labels
def __lowercase ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] =VideoMAEModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
__UpperCamelCase : List[str] =model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : int =VideoMAEForPreTraining(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCamelCase : Tuple =torch.ones((self.num_masks,) )
__UpperCamelCase : Tuple =torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__UpperCamelCase : Optional[Any] =mask.expand(self.batch_size , -1 ).bool()
__UpperCamelCase : Optional[Any] =model(lowerCamelCase__ , lowerCamelCase__ )
# model only returns predictions for masked patches
__UpperCamelCase : str =mask.sum().item()
__UpperCamelCase : Dict =3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : List[str] =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[Any] =config_and_inputs
__UpperCamelCase : List[Any] ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __A ( a , a , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ : List[str] =(
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
UpperCamelCase__ : Dict =(
{"""feature-extraction""": VideoMAEModel, """video-classification""": VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ : Any =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : Optional[Any] =False
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =VideoMAEModelTester(self )
__UpperCamelCase : Dict =ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=False ):
"""simple docstring"""
__UpperCamelCase : Any =copy.deepcopy(lowerCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__UpperCamelCase : List[str] =torch.ones((self.model_tester.num_masks,) )
__UpperCamelCase : str =torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__UpperCamelCase : List[str] =mask.expand(self.model_tester.batch_size , -1 ).bool()
__UpperCamelCase : Any =bool_masked_pos.to(lowerCamelCase__ )
if return_labels:
if model_class in [
*get_values(lowerCamelCase__ ),
]:
__UpperCamelCase : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ )
return inputs_dict
def __lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='VideoMAE does not use inputs_embeds' )
def __lowercase ( self ):
"""simple docstring"""
pass
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any =model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase : str =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : Any =model_class(lowerCamelCase__ )
__UpperCamelCase : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase : Optional[Any] =[*signature.parameters.keys()]
__UpperCamelCase : Optional[Any] =['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
@slow
def __lowercase ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase : Any =VideoMAEModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowercase ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
__UpperCamelCase , __UpperCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase : int =True
for model_class in self.all_model_classes:
__UpperCamelCase : List[Any] =self.model_tester.seq_length - self.model_tester.num_masks
__UpperCamelCase : Tuple =(
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__UpperCamelCase : str =True
__UpperCamelCase : Optional[Any] =False
__UpperCamelCase : List[str] =True
__UpperCamelCase : Optional[int] =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCamelCase : Optional[Any] =model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCamelCase : Tuple =outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCamelCase : str =True
__UpperCamelCase : Tuple =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCamelCase : Tuple =model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCamelCase : List[Any] =outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__UpperCamelCase : Tuple =len(lowerCamelCase__ )
# Check attention is always last and order is fine
__UpperCamelCase : Union[str, Any] =True
__UpperCamelCase : int =True
__UpperCamelCase : Tuple =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCamelCase : List[Any] =model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCamelCase__ ) )
__UpperCamelCase : str =outputs.attentions
self.assertEqual(len(lowerCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowercase ( self ):
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : Optional[int] =model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
__UpperCamelCase : int =model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
__UpperCamelCase : int =outputs.hidden_states
__UpperCamelCase : str =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
__UpperCamelCase : Any =self.model_tester.seq_length - self.model_tester.num_masks
__UpperCamelCase : Tuple =num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCamelCase , __UpperCamelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase : List[Any] =True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase : List[str] =True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self ):
"""simple docstring"""
pass
def A ( ) -> Dict:
__UpperCamelCase : Union[str, Any] =hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' ,filename='eating_spaghetti.npy' ,repo_type='dataset' )
__UpperCamelCase : Dict =np.load(a_ )
return list(a_ )
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =VideoMAEForVideoClassification.from_pretrained('MCG-NJU/videomae-base-finetuned-kinetics' ).to(
lowerCamelCase__ )
__UpperCamelCase : str =self.default_image_processor
__UpperCamelCase : int =prepare_video()
__UpperCamelCase : Union[str, Any] =image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCamelCase : int =model(**lowerCamelCase__ )
# verify the logits
__UpperCamelCase : Any =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
__UpperCamelCase : List[Any] =torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : int =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' ).to(lowerCamelCase__ )
__UpperCamelCase : Optional[int] =self.default_image_processor
__UpperCamelCase : str =prepare_video()
__UpperCamelCase : int =image_processor(lowerCamelCase__ , return_tensors='pt' ).to(lowerCamelCase__ )
# add boolean mask, indicating which patches to mask
__UpperCamelCase : Optional[int] =hf_hub_download(repo_id='hf-internal-testing/bool-masked-pos' , filename='bool_masked_pos.pt' )
__UpperCamelCase : Tuple =torch.load(lowerCamelCase__ )
# forward pass
with torch.no_grad():
__UpperCamelCase : Optional[int] =model(**lowerCamelCase__ )
# verify the logits
__UpperCamelCase : Union[str, Any] =torch.Size([1, 1408, 1536] )
__UpperCamelCase : List[Any] =torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] , device=lowerCamelCase__ )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__UpperCamelCase : Dict =torch.tensor([0.5_142] , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__UpperCamelCase : Dict =VideoMAEForPreTraining.from_pretrained('MCG-NJU/videomae-base-short' , norm_pix_loss=lowerCamelCase__ ).to(
lowerCamelCase__ )
with torch.no_grad():
__UpperCamelCase : List[str] =model(**lowerCamelCase__ )
__UpperCamelCase : Optional[Any] =torch.tensor(torch.tensor([0.6_469] ) , device=lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss , lowerCamelCase__ , atol=1E-4 ) )
| 71 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=0.9 , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Dict:
snake_case : Optional[int] = size if size is not None else {"""shortest_edge""": 3_0}
snake_case : Optional[int] = crop_size if crop_size is not None else {"""height""": 3_0, """width""": 3_0}
snake_case : int = parent
snake_case : List[str] = batch_size
snake_case : Any = num_channels
snake_case : Optional[Any] = min_resolution
snake_case : Any = max_resolution
snake_case : Dict = do_resize_and_center_crop
snake_case : Any = size
snake_case : List[Any] = crop_pct
snake_case : int = crop_size
snake_case : int = do_normalize
snake_case : List[Any] = image_mean
snake_case : Tuple = image_std
def UpperCAmelCase ( self ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __lowercase (UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_snake_case = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : str = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Dict:
snake_case : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A , """size""" ) )
self.assertTrue(hasattr(A , """crop_pct""" ) )
self.assertTrue(hasattr(A , """do_normalize""" ) )
self.assertTrue(hasattr(A , """image_mean""" ) )
self.assertTrue(hasattr(A , """image_std""" ) )
def UpperCAmelCase ( self ) -> int:
snake_case : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 3_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 3_0, """width""": 3_0} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def UpperCAmelCase ( self ) -> Tuple:
pass
def UpperCAmelCase ( self ) -> List[Any]:
# Initialize image_processing
snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
snake_case : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Tuple = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> Dict:
# Initialize image_processing
snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
snake_case : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : Any = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def UpperCAmelCase ( self ) -> List[str]:
# Initialize image_processing
snake_case : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case : int = image_processing(A , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 124 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__a = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , ) -> List[Any]:
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = False ) -> int:
snake_case__ : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
snake_case__ : List[Any] = """cuda"""
elif fpaa and not torch.cuda.is_available():
raise ValueError("""`float16` model export is only supported on GPUs with CUDA""" )
else:
snake_case__ : Tuple = """cpu"""
snake_case__ : int = Path(_lowerCAmelCase )
# VAE DECODER
snake_case__ : List[str] = AutoencoderKL.from_pretrained(model_path + """/vae""" )
snake_case__ : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
snake_case__ : Dict = vae_decoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , 25 , 25 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / """vae_decoder""" / """model.onnx""" , ordered_input_names=["""latent_sample""", """return_dict"""] , output_names=["""sample"""] , dynamic_axes={
"""latent_sample""": {0: """batch""", 1: """channels""", 2: """height""", 3: """width"""},
} , opset=_lowerCAmelCase , )
del vae_decoder
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
__a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 364 |
'''simple docstring'''
def __snake_case( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __snake_case( _lowerCAmelCase ) -> None:
assert all(row == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for row in grid )
assert all(list(_lowerCAmelCase ) == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for col in zip(*_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[str] = 0
snake_case__ : str = len(_lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case__ : List[Any] = (left + right) // 2
snake_case__ : Tuple = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case__ : Tuple = mid + 1
else:
snake_case__ : Tuple = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = len(grid[0] )
for i in range(len(_lowerCAmelCase ) ):
snake_case__ : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCAmelCase ) * len(grid[0] )) - total
def __snake_case( _lowerCAmelCase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowerCAmelCase ):
if number < 0:
total += len(_lowerCAmelCase ) - i
break
return total
def __snake_case( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case__ : int = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case__ : Tuple = timeit(f"{func}(grid=grid)" , setup=_lowerCAmelCase , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 0 |
from __future__ import annotations
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> list[tuple[int, int]]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = position
_UpperCAmelCase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_UpperCAmelCase = []
for position in positions:
_UpperCAmelCase , _UpperCAmelCase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__lowerCAmelCase )
return permissible_positions
def __A ( __lowerCAmelCase )-> bool:
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool:
"""simple docstring"""
if is_complete(__lowerCAmelCase ):
return True
for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ):
_UpperCAmelCase , _UpperCAmelCase = position
if board[y][x] == 0:
_UpperCAmelCase = curr + 1
if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ):
return True
_UpperCAmelCase = 0
return False
def __A ( __lowerCAmelCase )-> list[list[int]]:
"""simple docstring"""
_UpperCAmelCase = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
for j in range(__lowerCAmelCase ):
_UpperCAmelCase = 1
if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ):
return board
_UpperCAmelCase = 0
_UpperCAmelCase = F"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 | """simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 135 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
_A = {
'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt',
'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt',
'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt',
'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt',
'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt',
'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt',
'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt',
'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt',
'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt',
'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : str = ['layers', 'blocks']
for k in ignore_keys:
state_dict.pop(a_, a_ )
_A = {
'blocks': 'layers',
'mlp.0': 'fc1',
'mlp.2': 'fc2',
'mlp_ln': 'final_layer_norm',
'.attn.query': '.self_attn.q_proj',
'.attn.key': '.self_attn.k_proj',
'.attn.value': '.self_attn.v_proj',
'.attn_ln': '.self_attn_layer_norm',
'.attn.out': '.self_attn.out_proj',
'.cross_attn.query': '.encoder_attn.q_proj',
'.cross_attn.key': '.encoder_attn.k_proj',
'.cross_attn.value': '.encoder_attn.v_proj',
'.cross_attn_ln': '.encoder_attn_layer_norm',
'.cross_attn.out': '.encoder_attn.out_proj',
'decoder.ln.': 'decoder.layer_norm.',
'encoder.ln.': 'encoder.layer_norm.',
'token_embedding': 'embed_tokens',
'encoder.positional_embedding': 'encoder.embed_positions.weight',
'decoder.positional_embedding': 'decoder.embed_positions.weight',
'ln_post': 'layer_norm',
}
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : Tuple = list(s_dict.keys() )
for key in keys:
lowerCamelCase : List[Any] = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowerCamelCase : Optional[int] = new_key.replace(a_, a_ )
print(F"""{key} -> {new_key}""" )
lowerCamelCase : Any = s_dict.pop(a_ )
return s_dict
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = emb.weight.shape
lowerCamelCase : Dict = nn.Linear(a_, a_, bias=a_ )
lowerCamelCase : Union[str, Any] = emb.weight.data
return lin_layer
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
os.makedirs(a_, exist_ok=a_ )
lowerCamelCase : Union[str, Any] = os.path.basename(a_ )
lowerCamelCase : Any = url.split('/' )[-2]
lowerCamelCase : Tuple = os.path.join(a_, a_ )
if os.path.exists(a_ ) and not os.path.isfile(a_ ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(a_ ):
lowerCamelCase : Union[str, Any] = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(a_ ) as source, open(a_, 'wb' ) as output:
with tqdm(
total=int(source.info().get('Content-Length' ) ), ncols=80, unit='iB', unit_scale=a_, unit_divisor=1024 ) as loop:
while True:
lowerCamelCase : Union[str, Any] = source.read(8192 )
if not buffer:
break
output.write(a_ )
loop.update(len(a_ ) )
lowerCamelCase : int = open(a_, 'rb' ).read()
if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' )
return model_bytes
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
if ".pt" not in checkpoint_path:
lowerCamelCase : str = _download(_MODELS[checkpoint_path] )
else:
lowerCamelCase : Any = torch.load(a_, map_location='cpu' )
lowerCamelCase : List[str] = original_checkpoint['dims']
lowerCamelCase : Any = original_checkpoint['model_state_dict']
lowerCamelCase : Tuple = state_dict['decoder.token_embedding.weight']
remove_ignore_keys_(a_ )
rename_keys(a_ )
lowerCamelCase : List[Any] = True
lowerCamelCase : str = state_dict['decoder.layers.0.fc1.weight'].shape[0]
lowerCamelCase : Optional[int] = WhisperConfig(
vocab_size=dimensions['n_vocab'], encoder_ffn_dim=a_, decoder_ffn_dim=a_, num_mel_bins=dimensions['n_mels'], d_model=dimensions['n_audio_state'], max_target_positions=dimensions['n_text_ctx'], encoder_layers=dimensions['n_audio_layer'], encoder_attention_heads=dimensions['n_audio_head'], decoder_layers=dimensions['n_text_layer'], decoder_attention_heads=dimensions['n_text_state'], max_source_positions=dimensions['n_audio_ctx'], )
lowerCamelCase : Union[str, Any] = WhisperForConditionalGeneration(a_ )
lowerCamelCase , lowerCamelCase : Optional[int] = model.model.load_state_dict(a_, strict=a_ )
if len(a_ ) > 0 and not set(a_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
lowerCamelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowerCamelCase : Tuple = proj_out_weights
model.save_pretrained(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 205 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = BertConfig.from_json_file(a_ )
print(F'Building PyTorch model from configuration: {config}' )
lowerCAmelCase = BertForPreTraining(a_ )
# Load weights from tf checkpoint
load_tf_weights_in_bert(a_ , a_ , a_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--bert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 46 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase = "\\n Text data.\n Second line of data."
lowercase = "file"
@pytest.fixture(scope='session')
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / (FILE_PATH + '.zstd')
snake_case_ = bytes(a_ , 'utf-8')
with zstd.open(a_ , 'wb') as f:
f.write(a_)
return path
@pytest.fixture
def __UpperCAmelCase ( a_):
with open(os.path.join(tmpfs.local_root_dir , a_) , 'w') as f:
f.write(a_)
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ , a_):
snake_case_ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
snake_case_ = input_paths[compression_format]
snake_case_ = tmp_path / 'cache'
snake_case_ = DownloadConfig(cache_dir=a_ , extract_compressed_file=a_)
snake_case_ = cached_path(a_ , download_config=a_)
with open(a_) as f:
snake_case_ = f.read()
with open(a_) as f:
snake_case_ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False])
@pytest.mark.parametrize('default_cache_dir' , [True, False])
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_):
snake_case_ = 'custom_cache'
snake_case_ = 'custom_extracted_dir'
snake_case_ = tmp_path / 'custom_extracted_path'
if default_extracted:
snake_case_ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , a_)
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(a_))
snake_case_ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
snake_case_ = xz_file
snake_case_ = (
DownloadConfig(extract_compressed_file=a_)
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=a_)
)
snake_case_ = cached_path(a_ , download_config=a_)
assert Path(a_).parent.parts[-2:] == expected
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(Path(a_).resolve())
assert cached_path(a_) == text_file
# relative path
snake_case_ = str(Path(a_).resolve().relative_to(Path(os.getcwd())))
assert cached_path(a_) == text_file
def __UpperCAmelCase ( a_):
# absolute path
snake_case_ = str(tmp_path.resolve() / '__missing_file__.txt')
with pytest.raises(a_):
cached_path(a_)
# relative path
snake_case_ = './__missing_file__.txt'
with pytest.raises(a_):
cached_path(a_)
def __UpperCAmelCase ( a_):
snake_case_ = get_from_cache(f'''tmp://{tmpfs_file}''')
with open(a_) as f:
snake_case_ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( ):
with pytest.raises(a_):
cached_path('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
http_get('https://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
http_head('https://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
ftp_get('ftp://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
ftp_head('ftp://huggingface.co')
@patch('datasets.config.HF_DATASETS_OFFLINE' , a_)
def __UpperCAmelCase ( a_):
snake_case_ = tmp_path_factory.mktemp('data') / 'file.html'
with pytest.raises(a_):
fsspec_get('s3://huggingface.co' , temp_file=a_)
with pytest.raises(a_):
fsspec_head('s3://huggingface.co')
| 178 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__UpperCAmelCase = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1 | 0 |
"""simple docstring"""
import baseaa
def UpperCamelCase ( UpperCAmelCase ) ->bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
return baseaa.baadecode(UpperCAmelCase ).decode("utf-8" )
if __name__ == "__main__":
UpperCamelCase_ = 'Hello World!'
UpperCamelCase_ = baseaa_encode(test)
print(encoded)
UpperCamelCase_ = baseaa_decode(encoded)
print(decoded) | 243 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCamelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
a_ = "lm_head"
a_ = getattr(UpperCAmelCase , UpperCAmelCase )
if weight_type is not None:
a_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape
else:
a_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a_ = value
elif weight_type == "weight_g":
a_ = value
elif weight_type == "weight_v":
a_ = value
elif weight_type == "bias":
a_ = value
else:
a_ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
a_ = []
a_ = fairseq_model.state_dict()
a_ = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
a_ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == "group" , )
a_ = True
else:
for key, mapped_key in MAPPING.items():
a_ = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
a_ = True
if "*" in mapped_key:
a_ = name.split(UpperCAmelCase )[0].split("." )[-2]
a_ = mapped_key.replace("*" , UpperCAmelCase )
if "weight_g" in name:
a_ = "weight_g"
elif "weight_v" in name:
a_ = "weight_v"
elif "bias" in name:
a_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a_ = "weight"
else:
a_ = None
set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
continue
if not is_used:
unused_weights.append(UpperCAmelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
a_ = full_name.split("conv_layers." )[-1]
a_ = name.split("." )
a_ = int(items[0] )
a_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCAmelCase )
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True ) ->Tuple:
"""simple docstring"""
if config_path is not None:
a_ = UniSpeechConfig.from_pretrained(UpperCAmelCase )
else:
a_ = UniSpeechConfig()
if is_finetuned:
if dict_path:
a_ = Dictionary.load_from_json(UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a_ = target_dict.pad_index
a_ = target_dict.bos_index
a_ = target_dict.eos_index
a_ = len(target_dict.symbols )
a_ = os.path.join(UpperCAmelCase , "vocab.json" )
if not os.path.isdir(UpperCAmelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase ) )
return
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
a_ = target_dict.indices
# fairseq has the <pad> and <s> switched
a_ = 42
a_ = 43
with open(UpperCAmelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase , UpperCAmelCase )
a_ = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase , )
a_ = True if config.feat_extract_norm == "layer" else False
a_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
a_ = WavaVecaProcessor(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
processor.save_pretrained(UpperCAmelCase )
a_ = UniSpeechForCTC(UpperCAmelCase )
else:
a_ = UniSpeechForPreTraining(UpperCAmelCase )
if is_finetuned:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
a_ , a_ , a_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
a_ = model[0].eval()
recursively_load_weights(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
hf_unispeech.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCamelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 243 | 1 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCamelCase ( lowercase ):
def __init__(self : Optional[Any] , _A : NestedDataStructureLike[PathLike] , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : str = None , _A : bool = False , _A : bool = False , _A : Optional[int] = None , **_A : Dict , ) -> Optional[Any]:
super().__init__(
_A , split=_A , features=_A , cache_dir=_A , keep_in_memory=_A , streaming=_A , num_proc=_A , **_A , )
__snake_case : Optional[Any] = path_or_paths if isinstance(_A , _A) else {self.split: path_or_paths}
__snake_case : List[Any] = Text(
cache_dir=_A , data_files=_A , features=_A , **_A , )
def _lowercase (self : Optional[int]) -> int:
# Build iterable dataset
if self.streaming:
__snake_case : Optional[Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__snake_case : Any = None
__snake_case : Dict = None
__snake_case : List[str] = None
__snake_case : str = None
self.builder.download_and_prepare(
download_config=_A , download_mode=_A , verification_mode=_A , base_path=_A , num_proc=self.num_proc , )
__snake_case : Dict = self.builder.as_dataset(
split=self.split , verification_mode=_A , in_memory=self.keep_in_memory)
return dataset
| 95 | """simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Union[str, Any]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : str= "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Tuple:
__snake_case : Optional[Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_A , required=_A , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=_A , required=_A , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=_A , required=_A , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=_A , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=_A , default=_A , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_A)
def __init__(self : List[str] , _A : str , _A : str , _A : str , _A : str , _A : str , *_A : Any , ) -> Optional[Any]:
__snake_case : List[Any] = logging.get_logger('transformers-cli/converting')
self._logger.info(f"Loading model {model_type}")
__snake_case : List[str] = model_type
__snake_case : int = tf_checkpoint
__snake_case : Optional[int] = pytorch_dump_output
__snake_case : Optional[Any] = config
__snake_case : Optional[Any] = finetuning_task_name
def _lowercase (self : List[str]) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
if "ckpt" in self._tf_checkpoint.lower():
__snake_case : Union[str, Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
else:
__snake_case : Optional[Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
_A , self._config , self._pytorch_dump_output , _A)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 95 | 1 |
from torch import nn
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 130 | import argparse
import json
from tqdm import tqdm
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--src_path''' , type=SCREAMING_SNAKE_CASE , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , )
parser.add_argument(
'''--evaluation_set''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed evaluation_set file''' , )
parser.add_argument(
'''--gold_data_path''' , type=SCREAMING_SNAKE_CASE , help='''where to store parsed gold_data_path file''' , )
__UpperCamelCase :str = parser.parse_args()
with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open(
args.gold_data_path , '''w''' ) as gold_file:
__UpperCamelCase :List[str] = json.load(SCREAMING_SNAKE_CASE )
for dpr_record in tqdm(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :List[str] = dpr_record['''question''']
__UpperCamelCase :Tuple = [context['''title'''] for context in dpr_record['''positive_ctxs''']]
eval_file.write(question + '''\n''' )
gold_file.write('''\t'''.join(SCREAMING_SNAKE_CASE ) + '''\n''' )
if __name__ == "__main__":
main()
| 43 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( A__ , unittest.TestCase ):
A__ = DDIMPipeline
A__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
A__ = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
A__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
A__ = False
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
_SCREAMING_SNAKE_CASE =DDIMScheduler()
_SCREAMING_SNAKE_CASE ={'unet': unet, 'scheduler': scheduler}
return components
def A ( self : str , _a : Tuple , _a : str=0 ) -> Optional[Any]:
'''simple docstring'''
if str(_a ).startswith('mps' ):
_SCREAMING_SNAKE_CASE =torch.manual_seed(_a )
else:
_SCREAMING_SNAKE_CASE =torch.Generator(device=_a ).manual_seed(_a )
_SCREAMING_SNAKE_CASE ={
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def A ( self : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='cpu'
_SCREAMING_SNAKE_CASE =self.get_dummy_components()
_SCREAMING_SNAKE_CASE =self.pipeline_class(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =self.get_dummy_inputs(_a )
_SCREAMING_SNAKE_CASE =pipe(**_a ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
_SCREAMING_SNAKE_CASE =np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
_SCREAMING_SNAKE_CASE =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_a , 1e-3 )
def A ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A ( self : List[str] ) -> List[Any]:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def A ( self : str ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='google/ddpm-cifar10-32'
_SCREAMING_SNAKE_CASE =UNetaDModel.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =DDIMScheduler()
_SCREAMING_SNAKE_CASE =DDIMPipeline(unet=_a , scheduler=_a )
ddim.to(_a )
ddim.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =ddim(generator=_a , eta=0.0 , output_type='numpy' ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_SCREAMING_SNAKE_CASE =np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE ='google/ddpm-ema-bedroom-256'
_SCREAMING_SNAKE_CASE =UNetaDModel.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =DDIMScheduler.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =DDIMPipeline(unet=_a , scheduler=_a )
ddpm.to(_a )
ddpm.set_progress_bar_config(disable=_a )
_SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
_SCREAMING_SNAKE_CASE =ddpm(generator=_a , output_type='numpy' ).images
_SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_SCREAMING_SNAKE_CASE =np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 114 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _lowerCAmelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =checkpoint
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['encoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_in.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.conv_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['decoder.norm_out.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['quant_conv.bias']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.weight']
_SCREAMING_SNAKE_CASE =vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
_SCREAMING_SNAKE_CASE =len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
_SCREAMING_SNAKE_CASE ={
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(_UpperCamelCase )
}
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =[key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.weight" )
_SCREAMING_SNAKE_CASE =vae_state_dict.pop(
f"encoder.down.{i}.downsample.conv.bias" )
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"down.{i}.block", 'new': f"down_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'encoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
for i in range(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =num_up_blocks - 1 - i
_SCREAMING_SNAKE_CASE =[
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
]
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.weight"
]
_SCREAMING_SNAKE_CASE =vae_state_dict[
f"decoder.up.{block_id}.upsample.conv.bias"
]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"up.{block_id}.block", 'new': f"up_blocks.{i}.resnets"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.block' in key]
_SCREAMING_SNAKE_CASE =2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE =[key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
_SCREAMING_SNAKE_CASE =renew_vae_resnet_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': f"mid.block_{i}", 'new': f"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =[key for key in vae_state_dict if 'decoder.mid.attn' in key]
_SCREAMING_SNAKE_CASE =renew_vae_attention_paths(_UpperCamelCase )
_SCREAMING_SNAKE_CASE ={'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase )
conv_attn_to_linear(_UpperCamelCase )
return new_checkpoint
def _lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : str , ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
_SCREAMING_SNAKE_CASE =io.BytesIO(r.content )
_SCREAMING_SNAKE_CASE =OmegaConf.load(_UpperCamelCase )
_SCREAMING_SNAKE_CASE =5_12
_SCREAMING_SNAKE_CASE ='cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
_SCREAMING_SNAKE_CASE ={}
with safe_open(_UpperCamelCase , framework='pt' , device='cpu' ) as f:
for key in f.keys():
_SCREAMING_SNAKE_CASE =f.get_tensor(_UpperCamelCase )
else:
_SCREAMING_SNAKE_CASE =torch.load(_UpperCamelCase , map_location=_UpperCamelCase )['state_dict']
# Convert the VAE model.
_SCREAMING_SNAKE_CASE =create_vae_diffusers_config(_UpperCamelCase , image_size=_UpperCamelCase )
_SCREAMING_SNAKE_CASE =custom_convert_ldm_vae_checkpoint(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =AutoencoderKL(**_UpperCamelCase )
vae.load_state_dict(_UpperCamelCase )
vae.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
lowerCamelCase : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 114 | 1 |
def a ( A__ : Optional[Any] , A__ : str , A__ : List[Any] , A__ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_lowercase =mf_knapsack(i - 1 , A__ , A__ , A__ )
else:
_lowercase =max(
mf_knapsack(i - 1 , A__ , A__ , A__ ) , mf_knapsack(i - 1 , A__ , A__ , j - wt[i - 1] ) + val[i - 1] , )
_lowercase =val
return f[i][j]
def a ( A__ : Tuple , A__ : Tuple , A__ : int , A__ : Dict ) -> Optional[Any]:
"""simple docstring"""
_lowercase =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_lowercase =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_lowercase =dp[i - 1][w_]
return dp[n][w_], dp
def a ( A__ : int , A__ : list , A__ : list ) -> str:
"""simple docstring"""
if not (isinstance(A__ , (list, tuple) ) and isinstance(A__ , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
_lowercase =len(A__ )
if num_items != len(A__ ):
_lowercase =(
'The number of weights must be the same as the number of values.\n'
F'''But got {num_items} weights and {len(A__ )} values'''
)
raise ValueError(A__ )
for i in range(A__ ):
if not isinstance(wt[i] , A__ ):
_lowercase =(
'All weights must be integers but got weight of '
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(A__ )
_lowercase , _lowercase =knapsack(A__ , A__ , A__ , A__ )
_lowercase =set()
_construct_solution(A__ , A__ , A__ , A__ , A__ )
return optimal_val, example_optional_set
def a ( A__ : list , A__ : list , A__ : int , A__ : int , A__ : set ) -> str:
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(A__ , A__ , i - 1 , A__ , A__ )
else:
optimal_set.add(A__ )
_construct_solution(A__ , A__ , i - 1 , j - wt[i - 1] , A__ )
if __name__ == "__main__":
lowercase_ = [3, 2, 4, 4]
lowercase_ = [4, 3, 2, 3]
lowercase_ = 4
lowercase_ = 6
lowercase_ = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase_ , lowercase_ = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase_ , lowercase_ = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 205 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( A__ : bool = True , *A__ : int , **A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
_lowercase =False
if main_process_only:
_lowercase =PartialState().local_process_index == 0
return _tqdm(*A__ , **A__ , disable=A__ )
| 205 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = 'ZinengTang/tvlt-base'
UpperCamelCase = tempfile.mkdtemp()
def UpperCAmelCase_ ( self , **A_ )-> int:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **A_ )
def UpperCAmelCase_ ( self , **A_ )-> Optional[int]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , A_ )
self.assertIsInstance(processor.image_processor , A_ )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
UpperCamelCase = np.ones([12000] )
UpperCamelCase = feature_extractor(A_ , return_tensors='np' )
UpperCamelCase = processor(audio=A_ , return_tensors='np' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
UpperCamelCase = np.ones([3, 224, 224] )
UpperCamelCase = image_processor(A_ , return_tensors='np' )
UpperCamelCase = processor(images=A_ , return_tensors='np' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
UpperCamelCase = np.ones([12000] )
UpperCamelCase = np.ones([3, 224, 224] )
UpperCamelCase = processor(audio=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['audio_values', 'audio_mask', 'pixel_values', 'pixel_mask'] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = self.get_image_processor()
UpperCamelCase = self.get_feature_extractor()
UpperCamelCase = TvltProcessor(image_processor=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='`processor` and `image_processor`+`feature_extractor` model input names do not match' , )
| 370 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 251 | 0 |
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_=0.01 , lowerCAmelCase_=10_00 ):
"""simple docstring"""
_snake_case = p_stop
_snake_case = max_length
def __iter__( self ):
"""simple docstring"""
_snake_case = 0
_snake_case = False
while not stop and count < self.max_length:
yield count
count += 1
_snake_case = random.random() < self.p_stop
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=True ):
"""simple docstring"""
_snake_case = [
BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
for i in range(2 )
]
_snake_case = [list(lowerCAmelCase_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(lowerCAmelCase_ ) for shard in batch_sampler_shards] , [len(lowerCAmelCase_ ) for e in expected] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(20 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=3 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(24 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
# Expected shouldn't change
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size.
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(22 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(21 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
# Check the shards when the dataset is very small.
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[[0, 1]], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
_snake_case = BatchSampler(range(2 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = [[], []]
self.check_batch_sampler_shards(lowerCAmelCase_ , lowerCAmelCase_ , split_batches=lowerCAmelCase_ , even_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_snake_case = [BatchSamplerShard(lowerCAmelCase_ , 2 , lowerCAmelCase_ , even_batches=lowerCAmelCase_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=2 , lowerCAmelCase_=False ):
"""simple docstring"""
random.seed(lowerCAmelCase_ )
_snake_case = list(lowerCAmelCase_ )
_snake_case = [
IterableDatasetShard(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , drop_last=lowerCAmelCase_ , num_processes=lowerCAmelCase_ , process_index=lowerCAmelCase_ , split_batches=lowerCAmelCase_ , )
for i in range(lowerCAmelCase_ )
]
_snake_case = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(lowerCAmelCase_ )
iterable_dataset_lists.append(list(lowerCAmelCase_ ) )
_snake_case = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_snake_case = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(lowerCAmelCase_ ) , len(lowerCAmelCase_ ) )
self.assertTrue(len(lowerCAmelCase_ ) % shard_batch_size == 0 )
_snake_case = []
for idx in range(0 , len(lowerCAmelCase_ ) , lowerCAmelCase_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(lowerCAmelCase_ ) < len(lowerCAmelCase_ ):
reference += reference
self.assertListEqual(lowerCAmelCase_ , reference[: len(lowerCAmelCase_ )] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = 42
_snake_case = RandomIterableDataset()
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
# Edge case with a very small dataset
_snake_case = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
self.check_iterable_dataset_shards(lowerCAmelCase_ , lowerCAmelCase_ , batch_size=4 , drop_last=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = BatchSampler(range(16 ) , batch_size=4 , drop_last=lowerCAmelCase_ )
_snake_case = SkipBatchSampler(lowerCAmelCase_ , 2 )
self.assertListEqual(list(lowerCAmelCase_ ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DataLoader(list(range(16 ) ) , batch_size=4 )
_snake_case = skip_first_batches(lowerCAmelCase_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
"""simple docstring"""
Accelerator()
_snake_case = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(lowerCAmelCase_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 42 | '''simple docstring'''
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __A ( UpperCamelCase__ ):
def __init__(self : int , __a : Distribution , __a : Dict=None , __a : int=None , __a : Any=0 ):
UpperCAmelCase_ = 1.0 if scale is None else scale
UpperCAmelCase_ = 0.0 if loc is None else loc
super().__init__(__a , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=__a )] )
@property
def _lowercase (self : Union[str, Any] ):
return self.base_dist.mean * self.scale + self.loc
@property
def _lowercase (self : List[Any] ):
return self.base_dist.variance * self.scale**2
@property
def _lowercase (self : List[Any] ):
return self.variance.sqrt()
class __A ( nn.Module ):
def __init__(self : Optional[int] , __a : int , __a : Dict[str, int] , __a : Callable[..., Tuple[torch.Tensor]] , **__a : List[str] ):
super().__init__(**__a )
UpperCAmelCase_ = args_dim
UpperCAmelCase_ = nn.ModuleList([nn.Linear(__a , __a ) for dim in args_dim.values()] )
UpperCAmelCase_ = domain_map
def _lowercase (self : List[str] , __a : torch.Tensor ):
UpperCAmelCase_ = [proj(__a ) for proj in self.proj]
return self.domain_map(*__a )
class __A ( nn.Module ):
def __init__(self : Union[str, Any] , __a : List[str] ):
super().__init__()
UpperCAmelCase_ = function
def _lowercase (self : Optional[int] , __a : List[str] , *__a : Optional[int] ):
return self.function(__a , *__a )
class __A :
a__ : type
a__ : int
a__ : Dict[str, int]
def __init__(self : List[Any] , __a : int = 1 ):
UpperCAmelCase_ = dim
UpperCAmelCase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def _lowercase (self : Any , __a : Any ):
if self.dim == 1:
return self.distribution_class(*__a )
else:
return Independent(self.distribution_class(*__a ) , 1 )
def _lowercase (self : List[str] , __a : Union[str, Any] , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None , ):
UpperCAmelCase_ = self._base_distribution(__a )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(__a , loc=__a , scale=__a , event_dim=self.event_dim )
@property
def _lowercase (self : Any ):
return () if self.dim == 1 else (self.dim,)
@property
def _lowercase (self : Dict ):
return len(self.event_shape )
@property
def _lowercase (self : Tuple ):
return 0.0
def _lowercase (self : List[str] , __a : int ):
return ParameterProjection(
in_features=__a , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _lowercase (self : Optional[int] , *__a : torch.Tensor ):
raise NotImplementedError()
@staticmethod
def _lowercase (__a : torch.Tensor ):
return (x + torch.sqrt(torch.square(__a ) + 4.0 )) / 2.0
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
a__ : type = StudentT
@classmethod
def _lowercase (cls : Union[str, Any] , __a : torch.Tensor , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
UpperCAmelCase_ = 2.0 + cls.squareplus(__a )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"loc": 1, "scale": 1}
a__ : type = Normal
@classmethod
def _lowercase (cls : Tuple , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __A ( UpperCamelCase__ ):
a__ : Dict[str, int] = {"total_count": 1, "logits": 1}
a__ : type = NegativeBinomial
@classmethod
def _lowercase (cls : Optional[Any] , __a : torch.Tensor , __a : torch.Tensor ):
UpperCAmelCase_ = cls.squareplus(__a )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _lowercase (self : List[str] , __a : str ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=__a , logits=__a )
else:
return Independent(self.distribution_class(total_count=__a , logits=__a ) , 1 )
def _lowercase (self : Optional[Any] , __a : int , __a : Optional[torch.Tensor] = None , __a : Optional[torch.Tensor] = None ):
UpperCAmelCase_ , UpperCAmelCase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 1 | 0 |
'''simple docstring'''
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Any ) -> str:
'''simple docstring'''
UpperCamelCase__ = torch.load(lowercase_ , map_location="cpu" )
UpperCamelCase__ = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ = v
else:
UpperCamelCase__ = v
UpperCamelCase__ = chkpt["params"]
UpperCamelCase__ = {n: v for n, v in config.items() if not isinstance(lowercase_ , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ = chkpt["dico_word2id"]
UpperCamelCase__ = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase_ , indent=2 ) + "\n" )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowercase_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(lowercase_ , indent=2 ) + "\n" )
if __name__ == "__main__":
__lowercase: List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowercase: Dict = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path) | 361 |
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token) | 31 | 0 |
UpperCAmelCase : Tuple = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase : Optional[int] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ):
"""simple docstring"""
a__ : Union[str, Any] =True
a__ : Any =[]
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
order.append(SCREAMING_SNAKE_CASE )
return order
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[bool] ):
"""simple docstring"""
a__ : List[str] =True
a__ : Tuple =[vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return component
def _A ( SCREAMING_SNAKE_CASE : dict[int, list[int]] ):
"""simple docstring"""
a__ : str =len(SCREAMING_SNAKE_CASE ) * [False]
a__ : dict[int, list[int]] ={vert: [] for vert in range(len(SCREAMING_SNAKE_CASE ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(SCREAMING_SNAKE_CASE )
a__ : Optional[Any] =[]
for i, was_visited in enumerate(SCREAMING_SNAKE_CASE ):
if not was_visited:
order += topology_sort(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : List[str] =[]
a__ : Optional[Any] =len(SCREAMING_SNAKE_CASE ) * [False]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
a__ : Any =order[len(SCREAMING_SNAKE_CASE ) - i - 1]
if not visited[vert]:
a__ : List[str] =find_components(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
components_list.append(SCREAMING_SNAKE_CASE )
return components_list
| 95 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95 | 1 |
"""simple docstring"""
def a__ ( snake_case__ , snake_case__ ) -> str:
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowerCamelCase = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
lowerCamelCase = str(bin(snake_case__ ) )[2:] # remove the leading "0b"
lowerCamelCase = max(len(snake_case__ ) , len(snake_case__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(snake_case__ ) , b_binary.zfill(snake_case__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=_a , speech_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , feature_extractor=_a , )
def _lowerCAmelCase ( self , _a = "auto" ):
"""simple docstring"""
if slice_size == "auto":
lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.enable_attention_slicing(_a )
@torch.no_grad()
def __call__( self , _a , _a=16_000 , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
"""simple docstring"""
lowerCamelCase = self.speech_processor.feature_extractor(
_a , return_tensors="""pt""" , sampling_rate=_a ).input_features.to(self.device )
lowerCamelCase = self.speech_model.generate(_a , max_length=480_000 )
lowerCamelCase = self.speech_processor.tokenizer.batch_decode(_a , skip_special_tokens=_a , normalize=_a )[
0
]
if isinstance(_a , _a ):
lowerCamelCase = 1
elif isinstance(_a , _a ):
lowerCamelCase = len(_a )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(_a )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_a , _a ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(_a )}.' )
# get prompt text embeddings
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCamelCase , lowerCamelCase , lowerCamelCase = text_embeddings.shape
lowerCamelCase = text_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , _a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase = 42
if negative_prompt is None:
lowerCamelCase = [""""""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='
f' {type(_a )}.' )
elif isinstance(_a , _a ):
lowerCamelCase = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
lowerCamelCase = negative_prompt
lowerCamelCase = text_input_ids.shape[-1]
lowerCamelCase = self.tokenizer(
_a , padding="""max_length""" , max_length=_a , truncation=_a , return_tensors="""pt""" , )
lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase = uncond_embeddings.shape[1]
lowerCamelCase = uncond_embeddings.repeat(1 , _a , 1 )
lowerCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , _a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCamelCase = torch.randn(_a , generator=_a , device="""cpu""" , dtype=_a ).to(
self.device )
else:
lowerCamelCase = torch.randn(_a , generator=_a , device=self.device , dtype=_a )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase = {}
if accepts_eta:
lowerCamelCase = eta
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
lowerCamelCase = self.unet(_a , _a , encoder_hidden_states=_a ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase = noise_pred.chunk(2 )
lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_a , _a , _a )
lowerCamelCase = 1 / 0.18_215 * latents
lowerCamelCase = self.vae.decode(_a ).sample
lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase = self.numpy_to_pil(_a )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_a , nsfw_content_detected=_a )
| 168 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( __lowerCamelCase : int ):
__UpperCAmelCase : Optional[Any] = [True] * limit
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__UpperCAmelCase : Optional[Any] = i * 2
while index < limit:
__UpperCAmelCase : int = False
__UpperCAmelCase : Any = index + i
__UpperCAmelCase : List[str] = [2]
for i in range(3 , __lowerCamelCase , 2 ):
if is_prime[i]:
primes.append(__lowerCamelCase )
return primes
def lowerCamelCase__ ( __lowerCamelCase : int = 1000000 ):
__UpperCAmelCase : Optional[int] = prime_sieve(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = 0
__UpperCAmelCase : Union[str, Any] = 0
for i in range(len(__lowerCamelCase ) ):
for j in range(i + length , len(__lowerCamelCase ) ):
__UpperCAmelCase : List[str] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__UpperCAmelCase : List[Any] = j - i
__UpperCAmelCase : str = sol
return largest
if __name__ == "__main__":
print(f"""{solution() = }""")
| 114 |
import cva
import numpy as np
class a :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowercase : float , __lowercase : int ) -> List[Any]:
if k in (0.04, 0.06):
__UpperCAmelCase : int = k
__UpperCAmelCase : List[str] = window_size
else:
raise ValueError("""invalid k value""" )
def __str__( self : Union[str, Any] ) -> str:
return str(self.k )
def UpperCAmelCase ( self : Optional[int] , __lowercase : str ) -> tuple[cva.Mat, list[list[int]]]:
__UpperCAmelCase : Optional[Any] = cva.imread(__lowercase , 0 )
__UpperCAmelCase , __UpperCAmelCase : List[Any] = img.shape
__UpperCAmelCase : list[list[int]] = []
__UpperCAmelCase : Any = img.copy()
__UpperCAmelCase : Tuple = cva.cvtColor(__lowercase , cva.COLOR_GRAY2RGB )
__UpperCAmelCase , __UpperCAmelCase : List[str] = np.gradient(__lowercase )
__UpperCAmelCase : int = dx**2
__UpperCAmelCase : Any = dy**2
__UpperCAmelCase : Any = dx * dy
__UpperCAmelCase : Optional[int] = 0.04
__UpperCAmelCase : List[str] = self.window_size // 2
for y in range(__lowercase , h - offset ):
for x in range(__lowercase , w - offset ):
__UpperCAmelCase : Tuple = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase : List[Any] = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase : Optional[int] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
__UpperCAmelCase : int = (wxx * wyy) - (wxy**2)
__UpperCAmelCase : Any = wxx + wyy
__UpperCAmelCase : Optional[Any] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
a : int = HarrisCorner(0.04, 3)
a ,a : Any = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 114 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
_A : str = [
'EAGER',
'AOT_EAGER',
'INDUCTOR',
'NVFUSER',
'AOT_NVFUSER',
'AOT_CUDAGRAPHS',
'OFI',
'FX2TRT',
'ONNXRT',
'IPEX',
]
def _a ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = True
while ask_again:
lowerCamelCase__ : List[Any] = input(UpperCAmelCase )
try:
if default is not None and len(UpperCAmelCase ) == 0:
return default
return convert_value(UpperCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(UpperCAmelCase )
def _a ( UpperCAmelCase , UpperCAmelCase=[] , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = BulletMenu(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = menu.run(default_choice=UpperCAmelCase )
return convert_value(UpperCAmelCase ) if convert_value is not None else result
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = int(UpperCAmelCase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
lowerCamelCase__ : str = int(UpperCAmelCase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def _a ( UpperCAmelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : Dict = int(UpperCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def _a ( UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = int(UpperCAmelCase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = int(UpperCAmelCase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def _a ( UpperCAmelCase ) -> Any:
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __SCREAMING_SNAKE_CASE ( argparse.RawDescriptionHelpFormatter ):
def __lowerCamelCase ( self : str , A : Optional[Any] , A : List[str] , A : int , A : Tuple ) ->List[str]:
lowerCamelCase__ : List[str] = super()._format_usage(A , A , A , A )
lowerCamelCase__ : Optional[Any] = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 265 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : Any = logging.get_logger(__name__)
_A : int = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Dict = "timesformer"
def __init__( self : List[str] , A : int=2_2_4 , A : Optional[Any]=1_6 , A : str=3 , A : str=8 , A : Any=7_6_8 , A : Dict=1_2 , A : Optional[Any]=1_2 , A : Any=3_0_7_2 , A : str="gelu" , A : Optional[int]=0.0 , A : Union[str, Any]=0.0 , A : List[Any]=0.02 , A : int=1e-6 , A : Tuple=True , A : Any="divided_space_time" , A : Optional[Any]=0 , **A : Tuple , ) ->str:
super().__init__(**A )
lowerCamelCase__ : Optional[Any] = image_size
lowerCamelCase__ : int = patch_size
lowerCamelCase__ : Any = num_channels
lowerCamelCase__ : Optional[int] = num_frames
lowerCamelCase__ : Optional[Any] = hidden_size
lowerCamelCase__ : Optional[int] = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : List[Any] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : Union[str, Any] = qkv_bias
lowerCamelCase__ : str = attention_type
lowerCamelCase__ : List[str] = drop_path_rate
| 265 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple =logging.get_logger(__name__)
A__ : List[str] ={
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase ( snake_case_ ):
_lowercase: Optional[Any] = '''lxmert'''
_lowercase: int = {}
def __init__( self : Dict , __snake_case : List[Any]=3_05_22 , __snake_case : Tuple=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : List[Any]=95_00 , __snake_case : List[str]=16_00 , __snake_case : str=4_00 , __snake_case : Union[str, Any]=30_72 , __snake_case : int="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : int=5_12 , __snake_case : Optional[Any]=2 , __snake_case : Tuple=0.02 , __snake_case : int=1E-1_2 , __snake_case : int=9 , __snake_case : Optional[Any]=5 , __snake_case : Union[str, Any]=5 , __snake_case : Any=20_48 , __snake_case : Union[str, Any]=4 , __snake_case : Union[str, Any]=6.67 , __snake_case : Optional[Any]=True , __snake_case : Union[str, Any]=True , __snake_case : Optional[int]=True , __snake_case : str=True , __snake_case : Optional[Any]=True , __snake_case : str=True , __snake_case : Tuple=True , **__snake_case : Optional[int] , ) -> List[str]:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = num_qa_labels
_lowerCAmelCase = num_object_labels
_lowerCAmelCase = num_attr_labels
_lowerCAmelCase = l_layers
_lowerCAmelCase = x_layers
_lowerCAmelCase = r_layers
_lowerCAmelCase = visual_feat_dim
_lowerCAmelCase = visual_pos_dim
_lowerCAmelCase = visual_loss_normalizer
_lowerCAmelCase = task_matched
_lowerCAmelCase = task_mask_lm
_lowerCAmelCase = task_obj_predict
_lowerCAmelCase = task_qa
_lowerCAmelCase = visual_obj_loss
_lowerCAmelCase = visual_attr_loss
_lowerCAmelCase = visual_feat_loss
_lowerCAmelCase = {'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**__snake_case )
| 70 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCamelCase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
UpperCamelCase_ = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCamelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowercase__( __UpperCamelCase: str ):
"""simple docstring"""
with open(__UpperCamelCase ,'rb' ) as f:
SCREAMING_SNAKE_CASE : List[str] = Image.open(__UpperCamelCase )
return im.convert('RGB' )
@dataclass
class _a :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the training data.'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''A folder containing the validation data.'''} )
A : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class _a :
'''simple docstring'''
A : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A : str = field(default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Name or path of preprocessor config.'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowercase__( __UpperCamelCase: Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = torch.stack([example['pixel_values'] for example in examples] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' ,__UpperCamelCase ,__UpperCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' ,datefmt='%m/%d/%Y %H:%M:%S' ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
SCREAMING_SNAKE_CASE : Any = load_dataset(
data_args.dataset_name ,data_args.dataset_config_name ,cache_dir=model_args.cache_dir ,task='image-classification' ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if data_args.train_dir is not None:
SCREAMING_SNAKE_CASE : Tuple = os.path.join(data_args.train_dir ,'**' )
if data_args.validation_dir is not None:
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(data_args.validation_dir ,'**' )
SCREAMING_SNAKE_CASE : str = load_dataset(
'imagefolder' ,data_files=__UpperCamelCase ,cache_dir=model_args.cache_dir ,task='image-classification' ,)
# If we don't have a validation split, split off a percentage of train as validation.
SCREAMING_SNAKE_CASE : Tuple = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split ,__UpperCamelCase ) and data_args.train_val_split > 0.0:
SCREAMING_SNAKE_CASE : int = dataset['train'].train_test_split(data_args.train_val_split )
SCREAMING_SNAKE_CASE : Optional[int] = split['train']
SCREAMING_SNAKE_CASE : int = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
SCREAMING_SNAKE_CASE : int = dataset['train'].features['labels'].names
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = {}, {}
for i, label in enumerate(__UpperCamelCase ):
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
SCREAMING_SNAKE_CASE : int = label
# Load the accuracy metric from the datasets package
SCREAMING_SNAKE_CASE : Any = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCamelCase: Dict ):
return metric.compute(predictions=np.argmax(p.predictions ,axis=1 ) ,references=p.label_ids )
SCREAMING_SNAKE_CASE : Optional[int] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path ,num_labels=len(__UpperCamelCase ) ,labelaid=__UpperCamelCase ,idalabel=__UpperCamelCase ,finetuning_task='image-classification' ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool('.ckpt' in model_args.model_name_or_path ) ,config=__UpperCamelCase ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
SCREAMING_SNAKE_CASE : List[str] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
SCREAMING_SNAKE_CASE : Optional[Any] = image_processor.size['shortest_edge']
else:
SCREAMING_SNAKE_CASE : List[Any] = (image_processor.size['height'], image_processor.size['width'])
SCREAMING_SNAKE_CASE : Dict = Normalize(mean=image_processor.image_mean ,std=image_processor.image_std )
SCREAMING_SNAKE_CASE : Dict = Compose(
[
RandomResizedCrop(__UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
SCREAMING_SNAKE_CASE : List[Any] = Compose(
[
Resize(__UpperCamelCase ),
CenterCrop(__UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__UpperCamelCase: List[Any] ):
SCREAMING_SNAKE_CASE : Optional[int] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(__UpperCamelCase: Dict ):
SCREAMING_SNAKE_CASE : List[str] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE : Tuple = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE : Optional[int] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__UpperCamelCase )
# Initalize our trainer
SCREAMING_SNAKE_CASE : List[Any] = Trainer(
model=__UpperCamelCase ,args=__UpperCamelCase ,train_dataset=dataset['train'] if training_args.do_train else None ,eval_dataset=dataset['validation'] if training_args.do_eval else None ,compute_metrics=__UpperCamelCase ,tokenizer=__UpperCamelCase ,data_collator=__UpperCamelCase ,)
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE : Any = None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = last_checkpoint
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model()
trainer.log_metrics('train' ,train_result.metrics )
trainer.save_metrics('train' ,train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
trainer.log_metrics('eval' ,__UpperCamelCase )
trainer.save_metrics('eval' ,__UpperCamelCase )
# Write model card and (optionally) push to hub
SCREAMING_SNAKE_CASE : List[str] = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
if __name__ == "__main__":
main()
| 251 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=UpperCamelCase__ , )
assert hasattr(self , 'env' )
def A ( self : Dict , UpperCamelCase__ : List[Any]=1 ):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def A ( self : int , UpperCamelCase__ : Any ):
"""simple docstring"""
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , UpperCamelCase__ )
| 249 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in table:
res += inp[i - 1]
return res
def __lowerCamelCase ( A__ ) -> Dict:
"""simple docstring"""
return data[1:] + data[0]
def __lowerCamelCase ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = ''
for i in range(len(A__ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCamelCase ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = int('0b' + data[0] + data[-1] , 2 )
UpperCamelCase = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase = message[:4]
UpperCamelCase = message[4:]
UpperCamelCase = apply_table(A__ , A__ )
UpperCamelCase = xor(A__ , A__ )
UpperCamelCase = apply_sbox(A__ , temp[:4] ) # noqa: E741
UpperCamelCase = apply_sbox(A__ , temp[4:] )
UpperCamelCase = '0' * (2 - len(A__ )) + l # noqa: E741
UpperCamelCase = '0' * (2 - len(A__ )) + r
UpperCamelCase = apply_table(l + r , A__ )
UpperCamelCase = xor(A__ , A__ )
return temp + right
if __name__ == "__main__":
_lowerCamelCase : str = input("Enter 10 bit key: ")
_lowerCamelCase : Optional[Any] = input("Enter 8 bit message: ")
_lowerCamelCase : Tuple = [6, 3, 7, 4, 8, 5, 10, 9]
_lowerCamelCase : Union[str, Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_lowerCamelCase : Union[str, Any] = [2, 4, 3, 1]
_lowerCamelCase : int = [2, 6, 3, 1, 4, 8, 5, 7]
_lowerCamelCase : Tuple = [4, 1, 3, 5, 7, 2, 8, 6]
_lowerCamelCase : Any = [4, 1, 2, 3, 2, 3, 4, 1]
_lowerCamelCase : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_lowerCamelCase : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_lowerCamelCase : str = apply_table(key, paa_table)
_lowerCamelCase : str = temp[:5]
_lowerCamelCase : Any = temp[5:]
_lowerCamelCase : Dict = left_shift(left)
_lowerCamelCase : int = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
_lowerCamelCase : Optional[int] = left_shift(left)
_lowerCamelCase : Union[str, Any] = left_shift(right)
_lowerCamelCase : Tuple = left_shift(left)
_lowerCamelCase : Optional[int] = left_shift(right)
_lowerCamelCase : Optional[int] = apply_table(left + right, pa_table)
# encryption
_lowerCamelCase : Dict = apply_table(message, IP)
_lowerCamelCase : Optional[int] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Any = temp[4:] + temp[:4]
_lowerCamelCase : List[Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = apply_table(temp, IP_inv)
print("Cipher text is:", CT)
# decryption
_lowerCamelCase : List[str] = apply_table(CT, IP)
_lowerCamelCase : Union[str, Any] = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Tuple = temp[4:] + temp[:4]
_lowerCamelCase : Any = function(expansion, sa, sa, keya, temp)
_lowerCamelCase : Optional[int] = apply_table(temp, IP_inv)
print("Plain text after decypting is:", PT)
| 249 | 1 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : Optional[int] ) -> Tuple:
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : List[str] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowercase_ : Any = -1
lowercase_ : List[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowercase_ : str = model.generate(A , max_new_tokens=10 , do_sample=A )
lowercase_ : Optional[int] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ : Dict = TextStreamer(A )
model.generate(A , max_new_tokens=10 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ : Any = cs.out[:-1]
self.assertEqual(A , A )
def A ( self : List[Any] ) -> int:
lowercase_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowercase_ : Optional[int] = -1
lowercase_ : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowercase_ : List[str] = model.generate(A , max_new_tokens=10 , do_sample=A )
lowercase_ : List[str] = tokenizer.decode(greedy_ids[0] )
lowercase_ : Any = TextIteratorStreamer(A )
lowercase_ : int = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowercase_ : int = Thread(target=model.generate , kwargs=A )
thread.start()
lowercase_ : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A , A )
def A ( self : Optional[Any] ) -> Dict:
lowercase_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowercase_ : Any = -1
lowercase_ : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowercase_ : List[str] = model.generate(A , max_new_tokens=10 , do_sample=A )
lowercase_ : Union[str, Any] = greedy_ids[:, input_ids.shape[1] :]
lowercase_ : Optional[int] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowercase_ : Union[str, Any] = TextStreamer(A , skip_prompt=A )
model.generate(A , max_new_tokens=10 , do_sample=A , streamer=A )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowercase_ : List[str] = cs.out[:-1]
self.assertEqual(A , A )
def A ( self : Any ) -> Any:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowercase_ : int = AutoTokenizer.from_pretrained('''distilgpt2''' )
lowercase_ : str = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A )
lowercase_ : str = -1
lowercase_ : Dict = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowercase_ : List[str] = TextStreamer(A , skip_special_tokens=A )
model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowercase_ : str = cs.out[:-1] # Remove the final "\n"
lowercase_ : Union[str, Any] = tokenizer(A , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def A ( self : List[str] ) -> List[Any]:
lowercase_ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
lowercase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A )
lowercase_ : Optional[int] = -1
lowercase_ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A )
lowercase_ : Optional[Any] = TextIteratorStreamer(A , timeout=0.001 )
lowercase_ : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
lowercase_ : Optional[int] = Thread(target=model.generate , kwargs=A )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A ):
lowercase_ : Union[str, Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 33 | '''simple docstring'''
from typing import Any
def UpperCamelCase_ ( _UpperCAmelCase : list , _UpperCAmelCase : list , _UpperCAmelCase : dict , _UpperCAmelCase : dict , _UpperCAmelCase : dict , ) -> list:
"""simple docstring"""
_validation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# Creates data structures and fill initial step
_UpperCAmelCase : dict = {}
_UpperCAmelCase : dict = {}
for state in states_space:
_UpperCAmelCase : Union[str, Any] = observations_space[0]
_UpperCAmelCase : Tuple = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
_UpperCAmelCase : List[str] = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_UpperCAmelCase ) ):
_UpperCAmelCase : Optional[Any] = observations_space[o]
_UpperCAmelCase : int = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_UpperCAmelCase : str = ""
_UpperCAmelCase : Tuple = -1
for k_state in states_space:
_UpperCAmelCase : Any = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_UpperCAmelCase : Union[str, Any] = probability
_UpperCAmelCase : str = k_state
# Update probabilities and pointers dicts
_UpperCAmelCase : Optional[int] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_UpperCAmelCase : Tuple = arg_max
# The final observation
_UpperCAmelCase : Optional[Any] = observations_space[len(_UpperCAmelCase ) - 1]
# argmax for given final observation
_UpperCAmelCase : List[str] = ""
_UpperCAmelCase : Any = -1
for k_state in states_space:
_UpperCAmelCase : Optional[int] = probabilities[(k_state, final_observation)]
if probability > max_probability:
_UpperCAmelCase : int = probability
_UpperCAmelCase : Dict = k_state
_UpperCAmelCase : Dict = arg_max
# Process pointers backwards
_UpperCAmelCase : List[Any] = last_state
_UpperCAmelCase : str = []
for o in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
result.append(_UpperCAmelCase )
_UpperCAmelCase : List[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_not_empty(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
_validate_lists(_UpperCAmelCase , _UpperCAmelCase )
_validate_dicts(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> None:
"""simple docstring"""
_validate_list(_UpperCAmelCase , "observations_space" )
_validate_list(_UpperCAmelCase , "states_space" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
if not isinstance(_object , _UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""{var_name} must be a list"""
raise ValueError(_UpperCAmelCase )
else:
for x in _object:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""{var_name} must be a list of strings"""
raise ValueError(_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Any , ) -> None:
"""simple docstring"""
_validate_dict(_UpperCAmelCase , "initial_probabilities" , _UpperCAmelCase )
_validate_nested_dict(_UpperCAmelCase , "transition_probabilities" )
_validate_nested_dict(_UpperCAmelCase , "emission_probabilities" )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> None:
"""simple docstring"""
_validate_dict(_object , _UpperCAmelCase , _UpperCAmelCase )
for x in _object.values():
_validate_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : type , _UpperCAmelCase : bool = False ) -> None:
"""simple docstring"""
if not isinstance(_object , _UpperCAmelCase ):
_UpperCAmelCase : Any = F"""{var_name} must be a dict"""
raise ValueError(_UpperCAmelCase )
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for x in _object ):
_UpperCAmelCase : Tuple = F"""{var_name} all keys must be strings"""
raise ValueError(_UpperCAmelCase )
if not all(isinstance(_UpperCAmelCase , _UpperCAmelCase ) for x in _object.values() ):
_UpperCAmelCase : List[str] = "nested dictionary " if nested else ""
_UpperCAmelCase : List[str] = F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(_UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
lowerCamelCase : str = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
lowerCamelCase : Union[str, Any] = 10
lowerCamelCase : List[str] = 2_56
def snake_case_ ( lowerCAmelCase_ : List[str] ):
if len(lowerCAmelCase_ ) < MIN_NUM_TOKENS:
return None
__lowercase : Dict = MinHash(num_perm=lowerCAmelCase_ )
for token in set(lowerCAmelCase_ ):
min_hash.update(token.encode() )
return min_hash
def snake_case_ ( lowerCAmelCase_ : str ):
return {t for t in NON_ALPHA.split(lowerCAmelCase_ ) if len(t.strip() ) > 0}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , *,
__a : float = 0.85 , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Optional[Any] = duplication_jaccard_threshold
__lowercase : Optional[Any] = NUM_PERM
__lowercase : List[Any] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
__lowercase : List[str] = defaultdict(__a )
def lowerCAmelCase ( self : str , __a : Tuple , __a : MinHash ) -> None:
"""simple docstring"""
__lowercase : List[Any] = self._index.query(__a )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(__a , __a )
if len(__a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(__a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(__a )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[List[Dict]]:
"""simple docstring"""
__lowercase : Dict = []
for base, duplicates in self._duplicate_clusters.items():
__lowercase : List[str] = [base] + list(__a )
# reformat the cluster to be a list of dict
__lowercase : Optional[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(__a )
return duplicate_clusters
def lowerCAmelCase ( self : Any , __a : int ) -> None:
"""simple docstring"""
__lowercase : Tuple = self.get_duplicate_clusters()
with open(__a , """w""" ) as f:
json.dump(__a , __a )
def snake_case_ ( lowerCAmelCase_ : str ):
__lowercase , __lowercase : Union[str, Any] = element
__lowercase : Optional[Any] = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(lowerCAmelCase_ , max_queue_size=10000 ) , chunksize=100 , ):
if data is not None:
yield data
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float ):
__lowercase : Dict = DuplicationIndex(duplication_jaccard_threshold=lowerCAmelCase_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(lowerCAmelCase_ ) ) , max_queue_size=100 ) ):
di.add(lowerCAmelCase_ , lowerCAmelCase_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def snake_case_ ( lowerCAmelCase_ : str , lowerCAmelCase_ : str ):
__lowercase : List[str] = get_tokens(lowerCAmelCase_ )
__lowercase : Dict = get_tokens(lowerCAmelCase_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
lowerCamelCase : List[str] = None
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] ):
__lowercase : Union[str, Any] = []
for elementa in cluster:
__lowercase : Tuple = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
__lowercase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(lowerCAmelCase_ , lowerCAmelCase_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
__lowercase : Dict = 1
extremes.append(lowerCAmelCase_ )
return extremes
def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Tuple ):
global _shared_dataset
__lowercase : Tuple = dataset
__lowercase : Optional[int] = []
__lowercase : str = partial(_find_cluster_extremes_shared , jaccard_threshold=lowerCAmelCase_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
lowerCAmelCase_ , lowerCAmelCase_ , ) , total=len(lowerCAmelCase_ ) , ):
extremes_list.append(lowerCAmelCase_ )
return extremes_list
def snake_case_ ( lowerCAmelCase_ : Type[Dataset] , lowerCAmelCase_ : float = 0.85 ):
__lowercase : Optional[int] = make_duplicate_clusters(lowerCAmelCase_ , lowerCAmelCase_ )
__lowercase : Tuple = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
__lowercase : int = {}
__lowercase : Dict = find_extremes(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for extremes in extremes_clusters:
for element in extremes:
__lowercase : Optional[Any] = element
__lowercase : int = duplicate_indices - set(extreme_dict.keys() )
__lowercase : int = dataset.filter(lambda lowerCAmelCase_ , lowerCAmelCase_ : idx not in remove_indices , with_indices=lowerCAmelCase_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
__lowercase : List[str] = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
__lowercase : str = extreme_dict[element["""base_index"""]]["""copies"""]
print(F"Original dataset size: {len(lowerCAmelCase_ )}" )
print(F"Number of duplicate clusters: {len(lowerCAmelCase_ )}" )
print(F"Files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Unique files in duplicate cluster: {len(lowerCAmelCase_ )}" )
print(F"Filtered dataset size: {len(lowerCAmelCase_ )}" )
return ds_filter, duplicate_clusters | 306 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase : Optional[Any] = {
'''configuration_poolformer''': [
'''POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''PoolFormerConfig''',
'''PoolFormerOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : int = ['''PoolFormerFeatureExtractor''']
lowerCamelCase : Union[str, Any] = ['''PoolFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'''POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PoolFormerForImageClassification''',
'''PoolFormerModel''',
'''PoolFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 306 | 1 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ : str = logging.get_logger(__name__)
# General docstring
a_ : str = "RegNetConfig"
# Base docstring
a_ : Optional[Any] = "facebook/regnet-y-040"
a_ : List[Any] = [1, 1_0_8_8, 7, 7]
# Image classification docstring
a_ : Any = "facebook/regnet-y-040"
a_ : Tuple = "tabby, tabby cat"
a_ : Optional[Any] = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 3 , __magic_name__ = 1 , __magic_name__ = 1 , __magic_name__ = "relu" , ) -> Union[str, Any]:
super().__init__()
_a = nn.Convad(
__magic_name__ , __magic_name__ , kernel_size=__magic_name__ , stride=__magic_name__ , padding=kernel_size // 2 , groups=__magic_name__ , bias=__magic_name__ , )
_a = nn.BatchNormad(__magic_name__ )
_a = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
_a = self.convolution(__magic_name__ )
_a = self.normalization(__magic_name__ )
_a = self.activation(__magic_name__ )
return hidden_state
class a ( nn.Module ):
def __init__( self , __magic_name__ ) -> str:
super().__init__()
_a = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
_a = config.num_channels
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
_a = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
_a = self.embedder(__magic_name__ )
return hidden_state
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ = 2 ) -> str:
super().__init__()
_a = nn.Convad(__magic_name__ , __magic_name__ , kernel_size=1 , stride=__magic_name__ , bias=__magic_name__ )
_a = nn.BatchNormad(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Tensor:
_a = self.convolution(__magic_name__ )
_a = self.normalization(__magic_name__ )
return hidden_state
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ ) -> Any:
super().__init__()
_a = nn.AdaptiveAvgPoolad((1, 1) )
_a = nn.Sequential(
nn.Convad(__magic_name__ , __magic_name__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(__magic_name__ , __magic_name__ , kernel_size=1 ) , nn.Sigmoid() , )
def __UpperCAmelCase ( self , __magic_name__ ) -> Any:
# b c h w -> b c 1 1
_a = self.pooler(__magic_name__ )
_a = self.attention(__magic_name__ )
_a = hidden_state * attention
return hidden_state
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1 ) -> int:
super().__init__()
_a = in_channels != out_channels or stride != 1
_a = max(1 , out_channels // config.groups_width )
_a = (
RegNetShortCut(__magic_name__ , __magic_name__ , stride=__magic_name__ ) if should_apply_shortcut else nn.Identity()
)
_a = nn.Sequential(
RegNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__magic_name__ , __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act ) , RegNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 , activation=__magic_name__ ) , )
_a = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
_a = hidden_state
_a = self.layer(__magic_name__ )
_a = self.shortcut(__magic_name__ )
hidden_state += residual
_a = self.activation(__magic_name__ )
return hidden_state
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 1 ) -> int:
super().__init__()
_a = in_channels != out_channels or stride != 1
_a = max(1 , out_channels // config.groups_width )
_a = (
RegNetShortCut(__magic_name__ , __magic_name__ , stride=__magic_name__ ) if should_apply_shortcut else nn.Identity()
)
_a = nn.Sequential(
RegNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(__magic_name__ , __magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act ) , RegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(__magic_name__ , __magic_name__ , kernel_size=1 , activation=__magic_name__ ) , )
_a = ACTaFN[config.hidden_act]
def __UpperCAmelCase ( self , __magic_name__ ) -> Tuple:
_a = hidden_state
_a = self.layer(__magic_name__ )
_a = self.shortcut(__magic_name__ )
hidden_state += residual
_a = self.activation(__magic_name__ )
return hidden_state
class a ( nn.Module ):
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = 2 , __magic_name__ = 2 , ) -> Union[str, Any]:
super().__init__()
_a = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
_a = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , ) , *[layer(__magic_name__ , __magic_name__ , __magic_name__ ) for _ in range(depth - 1 )] , )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
_a = self.layers(__magic_name__ )
return hidden_state
class a ( nn.Module ):
def __init__( self , __magic_name__ ) -> Tuple:
super().__init__()
_a = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_a = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(__magic_name__ , config.depths[1:] ):
self.stages.append(RegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ ) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = False , __magic_name__ = True ) -> BaseModelOutputWithNoAttention:
_a = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_a = hidden_states + (hidden_state,)
_a = stage_module(__magic_name__ )
if output_hidden_states:
_a = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = RegNetConfig
_lowerCAmelCase = """regnet"""
_lowerCAmelCase = """pixel_values"""
_lowerCAmelCase = True
def __UpperCAmelCase ( self , __magic_name__ ) -> Dict:
if isinstance(__magic_name__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(__magic_name__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__=False ) -> Any:
if isinstance(__magic_name__ , __magic_name__ ):
_a = value
a_ : Tuple = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
a_ : List[str] = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ ) -> str:
super().__init__(__magic_name__ )
_a = config
_a = RegNetEmbeddings(__magic_name__ )
_a = RegNetEncoder(__magic_name__ )
_a = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ = None , __magic_name__ = None ) -> BaseModelOutputWithPoolingAndNoAttention:
_a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = self.embedder(__magic_name__ )
_a = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
_a = encoder_outputs[0]
_a = self.pooler(__magic_name__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _SCREAMING_SNAKE_CASE , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a ( _SCREAMING_SNAKE_CASE ):
def __init__( self , __magic_name__ ) -> Dict:
super().__init__(__magic_name__ )
_a = config.num_labels
_a = RegNetModel(__magic_name__ )
# classification head
_a = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCAmelCase ( self , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , ) -> ImageClassifierOutputWithNoAttention:
_a = return_dict if return_dict is not None else self.config.use_return_dict
_a = self.regnet(__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ )
_a = outputs.pooler_output if return_dict else outputs[1]
_a = self.classifier(__magic_name__ )
_a = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_a = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_a = 'single_label_classification'
else:
_a = 'multi_label_classification'
if self.config.problem_type == "regression":
_a = MSELoss()
if self.num_labels == 1:
_a = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_a = loss_fct(__magic_name__ , __magic_name__ )
elif self.config.problem_type == "single_label_classification":
_a = CrossEntropyLoss()
_a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_a = BCEWithLogitsLoss()
_a = loss_fct(__magic_name__ , __magic_name__ )
if not return_dict:
_a = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 168 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a_ : str = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
a_ : Tuple = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a_ : Any = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a_ : Union[str, Any] = sorted(arg_to_scheduler.keys())
a_ : List[Any] = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class a ( pl.LightningModule ):
def __init__( self , __magic_name__ , __magic_name__=None , __magic_name__="base" , __magic_name__=None , __magic_name__=None , __magic_name__=None , **__magic_name__ , ) -> List[str]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__magic_name__ )
_a = 0
_a = Path(self.hparams.output_dir )
_a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
_a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=__magic_name__ , **__magic_name__ , )
else:
_a = config
_a = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , __magic_name__ , __magic_name__ ):
assert hasattr(self.config , __magic_name__ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __magic_name__ , getattr(self.hparams , __magic_name__ ) )
if tokenizer is None:
_a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__magic_name__ , )
else:
_a = tokenizer
_a = MODEL_MODES[mode]
if model is None:
_a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__magic_name__ , )
else:
_a = model
def __UpperCAmelCase ( self , *__magic_name__ , **__magic_name__ ) -> List[Any]:
_a = self.model_type.from_pretrained(*__magic_name__ , **__magic_name__ )
def __UpperCAmelCase ( self ) -> List[str]:
_a = arg_to_scheduler[self.hparams.lr_scheduler]
_a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
_a = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __UpperCAmelCase ( self ) -> Any:
_a = self.model
_a = ['bias', 'LayerNorm.weight']
_a = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
_a = Adafactor(
__magic_name__ , lr=self.hparams.learning_rate , scale_parameter=__magic_name__ , relative_step=__magic_name__ )
else:
_a = AdamW(
__magic_name__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
_a = optimizer
_a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> List[str]:
return self.validation_step(__magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
return self.validation_end(__magic_name__ )
def __UpperCAmelCase ( self ) -> int:
_a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
_a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , __magic_name__ ) -> Optional[int]:
if stage == "test":
_a = len(self.test_dataloader().dataset )
else:
_a = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=__magic_name__ )
_a = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ = False ) -> int:
raise NotImplementedError('You must implement this for your task' )
def __UpperCAmelCase ( self ) -> Tuple:
return self.train_loader
def __UpperCAmelCase ( self ) -> Dict:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self ) -> Tuple:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ ) -> List[Any]:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
__magic_name__ , list(filter(__magic_name__ , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , __magic_name__ ) -> None:
_a = self.output_dir.joinpath('best_tfmr' )
_a = self.step_count
self.model.save_pretrained(__magic_name__ )
self.tokenizer.save_pretrained(__magic_name__ )
@staticmethod
def __UpperCAmelCase ( __magic_name__ , __magic_name__ ) -> Optional[int]:
parser.add_argument(
'--model_name_or_path' , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=__magic_name__ , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=__magic_name__ , type=__magic_name__ , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(__magic_name__ ).parent / 'test_run' / 'cache' ) , type=__magic_name__ , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=__magic_name__ , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=__magic_name__ , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=__magic_name__ , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=__magic_name__ , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=__magic_name__ , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=__magic_name__ , metavar=__magic_name__ , type=__magic_name__ , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=__magic_name__ , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=__magic_name__ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=__magic_name__ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=__magic_name__ , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=__magic_name__ )
parser.add_argument('--train_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--eval_batch_size' , default=32 , type=__magic_name__ )
parser.add_argument('--adafactor' , action='store_true' )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__magic_name__ )
class a ( pl.Callback ):
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = trainer.lr_schedulers[0]['scheduler']
_a = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> int:
rank_zero_info('***** Validation results *****' )
_a = trainer.callback_metrics
# Log results
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
rank_zero_info('***** Test results *****' )
_a = trainer.callback_metrics
# Log and save results to file
_a = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(__magic_name__ , 'w' ) as writer:
for key in sorted(__magic_name__ ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(__magic_name__ , str(metrics[key] ) ) )
def _A (lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :List[Any] ) -> None:
'''simple docstring'''
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase__ , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase__ )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase__ , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase__ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase__ , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase__ ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase__ , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _A (lowerCAmelCase__ :BaseTransformer , lowerCAmelCase__ :argparse.Namespace , lowerCAmelCase__ :Tuple=None , lowerCAmelCase__ :Tuple=True , lowerCAmelCase__ :Optional[Any]=[] , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Union[str, Any]=None , **lowerCAmelCase__ :List[str] , ) -> str:
'''simple docstring'''
pl.seed_everything(args.seed )
# init model
_a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
_a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
_a = LoggingCallback()
_a = {}
if args.fpaa:
_a = 16
if args.gpus > 1:
_a = 'auto'
_a = 'ddp'
_a = args.accumulate_grad_batches
_a = None
_a = 'auto'
_a = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 168 | 1 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowercase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
__lowercase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
__lowercase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return float((preds == labels).mean() )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="binary" ):
'''simple docstring'''
__UpperCamelCase :List[Any] = simple_accuracy(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average=SCREAMING_SNAKE_CASE ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[Any] = {}
for id_pred, label in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
__UpperCamelCase :Dict = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
__UpperCamelCase :int = [(pred, label)]
__UpperCamelCase , __UpperCamelCase :int = [], []
for question, preds_labels in question_map.items():
__UpperCamelCase , __UpperCamelCase :List[Any] = zip(*SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=SCREAMING_SNAKE_CASE , average='''macro''' )
fas.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(SCREAMING_SNAKE_CASE ) )
ems.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[str] = float(sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE ) )
__UpperCamelCase :List[Any] = sum(SCREAMING_SNAKE_CASE ) / len(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = float(fa_score(y_true=SCREAMING_SNAKE_CASE , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self) -> str:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def UpperCamelCase__ ( self) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64'''),
"query": datasets.Value('''int64'''),
},
"prediction_text": datasets.Value('''string'''),
},
"references": {
"idx": {
"passage": datasets.Value('''int64'''),
"query": datasets.Value('''int64'''),
},
"answers": datasets.Sequence(datasets.Value('''string''')),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64'''),
"paragraph": datasets.Value('''int64'''),
"question": datasets.Value('''int64'''),
},
"prediction": datasets.Value('''int64'''),
},
"references": datasets.Value('''int64'''),
}
else:
return {
"predictions": datasets.Value('''int64'''),
"references": datasets.Value('''int64'''),
}
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> int:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowercase , __lowercase)}
elif self.config_name == "cb":
return acc_and_fa(__lowercase , __lowercase , fa_avg='''macro''')
elif self.config_name == "record":
__UpperCamelCase :Optional[Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
__UpperCamelCase :Optional[int] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(__lowercase , __lowercase)[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowercase , __lowercase)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowercase , __lowercase)}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''')
| 105 | import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__lowercase = logging.getLogger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = """token-classification"""
def __init__( self , __lowercase) -> str:
if type(__lowercase) == dict:
__UpperCamelCase :List[Any] = Namespace(**__lowercase)
__UpperCamelCase :Dict = import_module('''tasks''')
try:
__UpperCamelCase :str = getattr(__lowercase , hparams.task_type)
__UpperCamelCase :TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""")
__UpperCamelCase :Tuple = self.token_classification_task.get_labels(hparams.labels)
__UpperCamelCase :Tuple = CrossEntropyLoss().ignore_index
super().__init__(__lowercase , len(self.labels) , self.mode)
def UpperCamelCase__ ( self , **__lowercase) -> List[Any]:
return self.model(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Any:
__UpperCamelCase :str = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Union[str, Any] = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Dict = self(**__lowercase)
__UpperCamelCase :str = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :List[Any] = self.hparams
for mode in ["train", "dev", "test"]:
__UpperCamelCase :int = self._feature_file(__lowercase)
if os.path.exists(__lowercase) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :Any = torch.load(__lowercase)
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir)
__UpperCamelCase :Any = self.token_classification_task.read_examples_from_file(args.data_dir , __lowercase)
__UpperCamelCase :Union[str, Any] = self.token_classification_task.convert_examples_to_features(
__lowercase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet''']) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowercase , pad_on_left=bool(self.config.model_type in ['''xlnet''']) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , __lowercase)
torch.save(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = False) -> DataLoader:
__UpperCamelCase :Tuple = self._feature_file(__lowercase)
logger.info('''Loading features from cached file %s''' , __lowercase)
__UpperCamelCase :str = torch.load(__lowercase)
__UpperCamelCase :int = torch.tensor([f.input_ids for f in features] , dtype=torch.long)
__UpperCamelCase :Optional[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long)
if features[0].token_type_ids is not None:
__UpperCamelCase :str = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long)
else:
__UpperCamelCase :Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long)
# HACK(we will not use this anymore soon)
__UpperCamelCase :int = torch.tensor([f.label_ids for f in features] , dtype=torch.long)
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase) , batch_size=__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> Dict:
"""Compute validation""" ""
__UpperCamelCase :int = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
__UpperCamelCase :Any = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
__UpperCamelCase :Any = self(**__lowercase)
__UpperCamelCase , __UpperCamelCase :Tuple = outputs[:2]
__UpperCamelCase :List[str] = logits.detach().cpu().numpy()
__UpperCamelCase :List[str] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def UpperCamelCase__ ( self , __lowercase) -> List[str]:
__UpperCamelCase :Tuple = torch.stack([x['''val_loss'''] for x in outputs]).mean()
__UpperCamelCase :str = np.concatenate([x['''pred'''] for x in outputs] , axis=0)
__UpperCamelCase :Any = np.argmax(__lowercase , axis=2)
__UpperCamelCase :str = np.concatenate([x['''target'''] for x in outputs] , axis=0)
__UpperCamelCase :List[str] = dict(enumerate(self.labels))
__UpperCamelCase :Tuple = [[] for _ in range(out_label_ids.shape[0])]
__UpperCamelCase :Any = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
__UpperCamelCase :Any = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(__lowercase , __lowercase),
'''precision''': precision_score(__lowercase , __lowercase),
'''recall''': recall_score(__lowercase , __lowercase),
'''f1''': fa_score(__lowercase , __lowercase),
}
__UpperCamelCase :Dict = dict(results.items())
__UpperCamelCase :List[str] = results
return ret, preds_list, out_label_list
def UpperCamelCase__ ( self , __lowercase) -> int:
# when stable
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :List[Any] = self._eval_end(__lowercase)
__UpperCamelCase :Tuple = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def UpperCamelCase__ ( self , __lowercase) -> int:
# updating to test_epoch_end instead of deprecated test_end
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = self._eval_end(__lowercase)
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
__UpperCamelCase :Optional[Any] = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def UpperCamelCase__ ( __lowercase , __lowercase) -> Union[str, Any]:
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowercase , __lowercase)
parser.add_argument(
'''--task_type''' , default='''NER''' , type=__lowercase , help='''Task type to fine tune in training (e.g. NER, POS, etc)''')
parser.add_argument(
'''--max_seq_length''' , default=128 , type=__lowercase , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=__lowercase , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=__lowercase , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''')
return parser
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__lowercase = NERTransformer.add_model_specific_args(parser, os.getcwd())
__lowercase = parser.parse_args()
__lowercase = NERTransformer(args)
__lowercase = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__lowercase = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__lowercase = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 105 | 1 |
'''simple docstring'''
import enum
import shutil
import sys
a , a : Union[str, Any] = shutil.get_terminal_size()
a : Union[str, Any] = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class UpperCamelCase_ ( enum.Enum ):
lowercase = 0
lowercase = 1
def __lowerCamelCase ( _lowercase , _lowercase="" ) -> Union[str, Any]:
sys.stdout.write(str(_lowercase ) + end )
sys.stdout.flush()
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase="" ) -> List[str]:
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , _lowercase )
def __lowerCamelCase ( ) -> Any:
forceWrite("""\r""" )
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def __lowerCamelCase ( ) -> Optional[int]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def __lowerCamelCase ( ) -> List[Any]:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 265 |
'''simple docstring'''
from itertools import count
def __lowerCamelCase ( _lowercase = 5_0 ) -> int:
UpperCAmelCase : Any = [1] * min_block_length
for n in count(_lowercase ):
fill_count_functions.append(1 )
for block_length in range(_lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 265 | 1 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_UpperCAmelCase = logging.get_logger(__name__)
class snake_case_ ( __lowercase ):
def __init__( self : List[str] , *_snake_case : List[Any] , **_snake_case : int )->None:
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , _snake_case , )
super().__init__(*_snake_case , **_snake_case ) | 232 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class snake_case_ ( __lowercase ):
A_ = 'unispeech-sat'
def __init__( self : str , _snake_case : List[Any]=32 , _snake_case : Union[str, Any]=768 , _snake_case : Tuple=12 , _snake_case : Optional[int]=12 , _snake_case : Optional[Any]=3072 , _snake_case : Tuple="gelu" , _snake_case : int=0.1 , _snake_case : List[Any]=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : str=0.0 , _snake_case : List[str]=0.0 , _snake_case : int=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : Optional[Any]=0.02 , _snake_case : int=1E-5 , _snake_case : Dict="group" , _snake_case : Optional[Any]="gelu" , _snake_case : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , _snake_case : int=(5, 2, 2, 2, 2, 2, 2) , _snake_case : int=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Any=False , _snake_case : Optional[Any]=128 , _snake_case : Tuple=16 , _snake_case : str=False , _snake_case : Dict=True , _snake_case : Tuple=0.05 , _snake_case : str=10 , _snake_case : Tuple=2 , _snake_case : List[Any]=0.0 , _snake_case : str=10 , _snake_case : Any=0 , _snake_case : List[Any]=320 , _snake_case : Union[str, Any]=2 , _snake_case : Dict=0.1 , _snake_case : Dict=100 , _snake_case : Union[str, Any]=256 , _snake_case : int=256 , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[Any]="mean" , _snake_case : int=False , _snake_case : str=False , _snake_case : str=256 , _snake_case : List[Any]=(512, 512, 512, 512, 1500) , _snake_case : Optional[int]=(5, 3, 3, 1, 1) , _snake_case : Tuple=(1, 2, 3, 1, 1) , _snake_case : Dict=512 , _snake_case : Union[str, Any]=0 , _snake_case : List[str]=1 , _snake_case : Optional[Any]=2 , _snake_case : Optional[int]=504 , **_snake_case : Optional[int] , )->Union[str, Any]:
'''simple docstring'''
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : List[Any] = feat_extract_norm
__lowerCAmelCase : int = feat_extract_activation
__lowerCAmelCase : Union[str, Any] = list(_snake_case )
__lowerCAmelCase : str = list(_snake_case )
__lowerCAmelCase : Optional[Any] = list(_snake_case )
__lowerCAmelCase : Optional[int] = conv_bias
__lowerCAmelCase : Dict = num_conv_pos_embeddings
__lowerCAmelCase : List[Any] = num_conv_pos_embedding_groups
__lowerCAmelCase : Tuple = len(self.conv_dim )
__lowerCAmelCase : int = num_hidden_layers
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Optional[int] = hidden_dropout
__lowerCAmelCase : str = attention_dropout
__lowerCAmelCase : int = activation_dropout
__lowerCAmelCase : Union[str, Any] = feat_proj_dropout
__lowerCAmelCase : List[str] = final_dropout
__lowerCAmelCase : Dict = layerdrop
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : str = vocab_size
__lowerCAmelCase : Optional[int] = num_clusters
__lowerCAmelCase : List[Any] = do_stable_layer_norm
__lowerCAmelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowerCAmelCase : Dict = apply_spec_augment
__lowerCAmelCase : List[Any] = mask_time_prob
__lowerCAmelCase : List[str] = mask_time_length
__lowerCAmelCase : Dict = mask_time_min_masks
__lowerCAmelCase : Tuple = mask_feature_prob
__lowerCAmelCase : List[str] = mask_feature_length
__lowerCAmelCase : str = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__lowerCAmelCase : Optional[int] = num_codevectors_per_group
__lowerCAmelCase : List[Any] = num_codevector_groups
__lowerCAmelCase : int = contrastive_logits_temperature
__lowerCAmelCase : str = feat_quantizer_dropout
__lowerCAmelCase : int = num_negatives
__lowerCAmelCase : str = codevector_dim
__lowerCAmelCase : Any = proj_codevector_dim
__lowerCAmelCase : Any = diversity_loss_weight
# ctc loss
__lowerCAmelCase : Tuple = ctc_loss_reduction
__lowerCAmelCase : Any = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowerCAmelCase : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : List[str] = list(_snake_case )
__lowerCAmelCase : Optional[int] = list(_snake_case )
__lowerCAmelCase : Optional[int] = xvector_output_dim
@property
def UpperCAmelCase__ ( self : Optional[Any] )->Any:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 232 | 1 |
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class UpperCAmelCase_ :
UpperCamelCase =42
UpperCamelCase =None
UpperCamelCase =None
UpperCamelCase =None
UpperCamelCase =None
def _lowerCamelCase ( self ) -> Any:
__lowercase ,__lowercase ,__lowercase : Tuple = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> str:
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.major, self.minor, self.patch
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return Version(UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return other
raise TypeError(F"""{other} (type {type(UpperCamelCase_ )}) cannot be compared to version.""" )
def __eq__( self , UpperCamelCase_ ) -> Any:
try:
__lowercase : str = self._validate_operand(UpperCamelCase_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , UpperCamelCase_ ) -> Optional[Any]:
__lowercase : List[Any] = self._validate_operand(UpperCamelCase_ )
return self.tuple < other.tuple
def __hash__( self ) -> Dict:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _lowerCamelCase ( cls , UpperCamelCase_ ) -> Dict:
__lowercase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _lowerCamelCase ( self ) -> str:
return self.version_str
def __UpperCAmelCase ( __UpperCamelCase ):
__lowercase : int = _VERSION_REG.match(__UpperCamelCase )
if not res:
raise ValueError(f"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(__UpperCamelCase ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def __UpperCAmelCase ( __UpperCamelCase ):
return ".".join(str(__UpperCamelCase ) for v in version_tuple )
| 249 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> List[Any]:
__lowercase : Any = size if size is not None else {'''height''': 18, '''width''': 18}
__lowercase : Dict = parent
__lowercase : Dict = batch_size
__lowercase : int = num_channels
__lowercase : Union[str, Any] = image_size
__lowercase : Optional[int] = min_resolution
__lowercase : List[str] = max_resolution
__lowercase : Dict = do_resize
__lowercase : Any = size
__lowercase : Any = do_normalize
__lowercase : int = image_mean
__lowercase : Tuple = image_std
def _lowerCamelCase ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =DPTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = DPTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__lowercase : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def _lowerCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
__lowercase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> List[Any]:
# Initialize image_processing
__lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : Any = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def _lowerCamelCase ( self ) -> Tuple:
# Initialize image_processing
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__lowercase : str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 249 | 1 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase ( __lowercase ):
'''simple docstring'''
lowercase : Optional[Any] =(KDPMaDiscreteScheduler,)
lowercase : Tuple =10
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Optional[int] = {
'''num_train_timesteps''': 1100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_a )
return config
def UpperCamelCase ( self ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def UpperCamelCase ( self ):
lowercase_ :str = self.scheduler_classes[0]
lowercase_ :Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase_ :List[str] = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :str = self.dummy_model()
lowercase_ :Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :List[str] = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Any = scheduler.scale_model_input(_a , _a )
lowercase_ :Any = model(_a , _a )
lowercase_ :Union[str, Any] = scheduler.step(_a , _a , _a )
lowercase_ :Any = output.prev_sample
lowercase_ :List[Any] = torch.sum(torch.abs(_a ) )
lowercase_ :Any = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693_4286_5017_0972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCamelCase ( self ):
if torch_device == "mps":
return
lowercase_ :List[Any] = self.scheduler_classes[0]
lowercase_ :Optional[int] = self.get_scheduler_config()
lowercase_ :str = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
lowercase_ :Any = self.dummy_model()
lowercase_ :int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase_ :Union[str, Any] = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
lowercase_ :Tuple = scheduler.scale_model_input(_a , _a )
lowercase_ :List[str] = model(_a , _a )
lowercase_ :str = scheduler.step(_a , _a , _a )
lowercase_ :str = output.prev_sample
lowercase_ :int = torch.sum(torch.abs(_a ) )
lowercase_ :Union[str, Any] = torch.mean(torch.abs(_a ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCamelCase ( self ):
if torch_device == "mps":
return
lowercase_ :Optional[Any] = self.scheduler_classes[0]
lowercase_ :Optional[int] = self.get_scheduler_config()
lowercase_ :Tuple = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
lowercase_ :Optional[Any] = self.dummy_model()
lowercase_ :List[str] = self.dummy_sample_deter.to(_a ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase_ :Optional[Any] = scheduler.scale_model_input(_a , _a )
lowercase_ :Optional[int] = model(_a , _a )
lowercase_ :Tuple = scheduler.step(_a , _a , _a )
lowercase_ :Any = output.prev_sample
lowercase_ :Tuple = torch.sum(torch.abs(_a ) )
lowercase_ :Tuple = torch.mean(torch.abs(_a ) )
if str(_a ).startswith('''cpu''' ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 350 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Any =PegasusConfig
lowercase : Any ={}
lowercase : int ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=40 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :Tuple = batch_size
lowercase_ :Optional[Any] = seq_length
lowercase_ :Any = is_training
lowercase_ :Optional[int] = use_labels
lowercase_ :Optional[int] = vocab_size
lowercase_ :Optional[Any] = hidden_size
lowercase_ :List[Any] = num_hidden_layers
lowercase_ :Tuple = num_attention_heads
lowercase_ :Optional[Any] = intermediate_size
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Any = max_position_embeddings
lowercase_ :Any = eos_token_id
lowercase_ :List[str] = pad_token_id
lowercase_ :Optional[Any] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :int = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :Optional[int] = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = TFPegasusModel(config=UpperCamelCase_ ).get_decoder()
lowercase_ :Tuple = inputs_dict['''input_ids''']
lowercase_ :Optional[int] = input_ids[:1, :]
lowercase_ :Tuple = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :List[str] = inputs_dict['''head_mask''']
lowercase_ :int = 1
# first forward pass
lowercase_ :Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase_ , lowercase_ :Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ :Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase_ :Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase_ :Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase_ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowercase_ :str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase_ :Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase_ :Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowercase_ :Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
lowercase_ :Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowercase : List[str] =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowercase : str =(
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Optional[int] =True
lowercase : List[str] =False
lowercase : Union[str, Any] =False
def UpperCamelCase ( self ):
lowercase_ :Dict = TFPegasusModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase : Optional[int] =[
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowercase : Optional[Any] ="""google/pegasus-xsum"""
@cached_property
def UpperCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = self.translate_src_text(**UpperCamelCase_ )
assert self.expected_text == generated_words
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Dict = self.tokenizer(self.src_text , **UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''tf''' )
lowercase_ :int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase_ , )
lowercase_ :int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def UpperCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 252 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: List[Any]=3 , UpperCAmelCase_: Any=30 , UpperCAmelCase_: str=400 , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: List[Any]=None , UpperCAmelCase_: Optional[int]=True , UpperCAmelCase_: Optional[int]=1 / 255 , UpperCAmelCase_: int=True , UpperCAmelCase_: Optional[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_: Any=[0.5, 0.5, 0.5] , UpperCAmelCase_: List[Any]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1_333}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = do_rescale
_SCREAMING_SNAKE_CASE = rescale_factor
_SCREAMING_SNAKE_CASE = do_normalize
_SCREAMING_SNAKE_CASE = image_mean
_SCREAMING_SNAKE_CASE = image_std
_SCREAMING_SNAKE_CASE = do_pad
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: Any=False ):
'''simple docstring'''
if not batched:
_SCREAMING_SNAKE_CASE = image_inputs[0]
if isinstance(UpperCAmelCase_ , Image.Image ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.size
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = image.shape[1], image.shape[2]
if w < h:
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * h / w )
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
elif w > h:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = int(self.size["""shortest_edge"""] * w / h )
else:
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
_SCREAMING_SNAKE_CASE = self.size["""shortest_edge"""]
else:
_SCREAMING_SNAKE_CASE = []
for image in image_inputs:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[0] )[0]
_SCREAMING_SNAKE_CASE = max(UpperCAmelCase_ , key=lambda UpperCAmelCase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : int = DetrImageProcessor if is_vision_available() else None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = DetrImageProcessingTester(self )
@property
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_rescale""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """rescale_factor""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_pad""" ) )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1_333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase_ )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.image_processor_tester.get_expected_values(UpperCAmelCase_ , batched=UpperCAmelCase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""image_id""": 39_769, """annotations""": target}
# encode them
_SCREAMING_SNAKE_CASE = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
@slow
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_SCREAMING_SNAKE_CASE = json.loads(f.read() )
_SCREAMING_SNAKE_CASE = {"""file_name""": """000000039769.png""", """image_id""": 39_769, """segments_info""": target}
_SCREAMING_SNAKE_CASE = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_SCREAMING_SNAKE_CASE = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
_SCREAMING_SNAKE_CASE = image_processing(images=UpperCAmelCase_ , annotations=UpperCAmelCase_ , masks_path=UpperCAmelCase_ , return_tensors="""pt""" )
# verify pixel values
_SCREAMING_SNAKE_CASE = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["""pixel_values"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCAmelCase_ , atol=1E-4 ) )
# verify area
_SCREAMING_SNAKE_CASE = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCAmelCase_ ) )
# verify boxes
_SCREAMING_SNAKE_CASE = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCAmelCase_ , atol=1E-3 ) )
# verify image_id
_SCREAMING_SNAKE_CASE = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCAmelCase_ ) )
# verify is_crowd
_SCREAMING_SNAKE_CASE = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCAmelCase_ ) )
# verify class_labels
_SCREAMING_SNAKE_CASE = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCAmelCase_ ) )
# verify masks
_SCREAMING_SNAKE_CASE = 822_873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCAmelCase_ )
# verify orig_size
_SCREAMING_SNAKE_CASE = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCAmelCase_ ) )
# verify size
_SCREAMING_SNAKE_CASE = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCAmelCase_ ) )
| 306 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 | 1 |
import os
SCREAMING_SNAKE_CASE__ : Dict = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[str]:
__lowerCamelCase = 0
__lowerCamelCase = 0
while index < len(_lowercase ) - 1:
__lowerCamelCase = SYMBOLS[numerals[index]]
__lowerCamelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __magic_name__ ( __lowerCAmelCase : Optional[Any] ) -> Tuple:
__lowerCamelCase = ''''''
__lowerCamelCase = num // 1000
numerals += m_count * "M"
num %= 1000
__lowerCamelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__lowerCamelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __magic_name__ ( __lowerCAmelCase : Any = "/p089_roman.txt" ) -> List[Any]:
__lowerCamelCase = 0
with open(os.path.dirname(_lowercase ) + roman_numerals_filename ) as filea:
__lowerCamelCase = filea.readlines()
for line in lines:
__lowerCamelCase = line.strip()
__lowerCamelCase = parse_roman_numerals(_lowercase )
__lowerCamelCase = generate_roman_numerals(_lowercase )
savings += len(_lowercase ) - len(_lowercase )
return savings
if __name__ == "__main__":
print(F'{solution() = }')
| 359 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Union[str, Any] = """open-llama"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=10_00_00 , SCREAMING_SNAKE_CASE__ : Any=40_96 , SCREAMING_SNAKE_CASE__ : Any=1_10_08 , SCREAMING_SNAKE_CASE__ : Tuple=32 , SCREAMING_SNAKE_CASE__ : str=32 , SCREAMING_SNAKE_CASE__ : Any="silu" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=20_48 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : List[Any]=1e-6 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Tuple=1 , SCREAMING_SNAKE_CASE__ : str=2 , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : List[str]=None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Dict:
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = intermediate_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = initializer_range
__lowerCamelCase = rms_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = kwargs.pop(
'''use_memorry_efficient_attention''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_dropout_prob
__lowerCamelCase = use_stable_embedding
__lowerCamelCase = shared_input_output_embedding
__lowerCamelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , tie_word_embeddings=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
def __A ( self : Dict ) -> Optional[int]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , SCREAMING_SNAKE_CASE__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f'''got {self.rope_scaling}''' )
__lowerCamelCase = self.rope_scaling.get('''type''' , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.rope_scaling.get('''factor''' , SCREAMING_SNAKE_CASE__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 339 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
if "model" in orig_key:
a : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
a : Optional[int] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
a : List[Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
a : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
a : str = orig_key.split("." )[0].split("_" )[-1]
a : Optional[Any] = orig_key.replace(F"""transformer_{layer_num}""" , F"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
a : str = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
a : Dict = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
a : Optional[Any] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
a : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
a : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
a : Dict = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
a : str = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
a : Union[str, Any] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
a : str = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
a : Optional[int] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
a : str = "yoso." + orig_key
return orig_key
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : str ) ->Union[str, Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : int = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
a : Any = val
a : Any = orig_state_dict["cls.predictions.decoder.bias"]
a : Tuple = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Any , _lowercase : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
a : Optional[int] = torch.load(_lowercase , map_location="cpu" )["model_state_dict"]
a : Dict = YosoConfig.from_json_file(_lowercase )
a : int = YosoForMaskedLM(_lowercase )
a : Dict = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Tuple = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 105 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a : Optional[Any] = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] =MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Tuple = pipeline(
"document-question-answering" , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a : Optional[int] = INVOICE_URL
a : str = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
a : Union[str, Any] = "What is the placebo?"
a : Dict = [
{
"image": load_image(lowerCAmelCase__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Tuple = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
{"score": ANY(lowerCAmelCase__ ), "answer": ANY(lowerCAmelCase__ ), "start": ANY(lowerCAmelCase__ ), "end": ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> List[Any]:
a : List[Any] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
a : Dict = INVOICE_URL
a : List[str] = "How many cats are there?"
a : Tuple = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
a : Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
a : Optional[int] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
a : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
a : Optional[int] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
a : Tuple = []
a : Optional[int] = []
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Tuple:
a : int = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
a : List[str] = INVOICE_URL
a : List[Any] = "What is the invoice number?"
a : int = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Any = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __a ( self ) -> Optional[int]:
a : List[str] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : str = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
a : Tuple = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> str:
a : Optional[int] = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : int = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , )
a : List[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[str] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : Dict = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
a : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
a : Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Optional[Any] = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __a ( self ) -> Tuple:
a : int = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowerCAmelCase__ )
a : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowerCAmelCase__ , revision="3dc6de3" , max_seq_len=50 , )
a : List[str] = INVOICE_URL
a : Union[str, Any] = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
a : List[str] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
a : List[Any] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , "" ) ) )
# This model should also work if `image` is set to None
a : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def __a ( self ) -> int:
a : Tuple = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
a : Optional[Any] = INVOICE_URL
a : Tuple = "What is the invoice number?"
a : List[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def __a ( self ) -> int:
pass
| 105 | 1 |
from __future__ import annotations
UpperCamelCase = 1.60_21E-19 # units = C
def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , ):
if (conductivity, electron_conc, mobility).count(0) != 1:
raise ValueError("You cannot supply more or less than 2 values")
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative")
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative")
elif mobility < 0:
raise ValueError("mobility cannot be negative")
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 333 | 0 |
import argparse
import json
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( ) -> int:
'''simple docstring'''
__UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=_lowerCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=_lowerCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=_lowerCamelCase , help="where to store parsed gold_data_path file" , )
__UpperCamelCase : int = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
__UpperCamelCase : Tuple = json.load(_lowerCamelCase)
for dpr_record in tqdm(_lowerCamelCase):
__UpperCamelCase : List[str] = dpr_record["question"]
__UpperCamelCase : List[Any] = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(_lowerCamelCase) + "\n")
if __name__ == "__main__":
main() | 232 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : List[str] = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = [
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 232 | 1 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , *lowercase_ , **lowercase_ ) -> None:
'''simple docstring'''
warnings.warn(
'The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PoolFormerImageProcessor instead.' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 14 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(A )
@property
def __A ( self ) -> str:
'''simple docstring'''
def extract(*A , **A ):
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = torch.ones([0] )
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
self.pixel_values.to(A )
return self
return Out()
return extract
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCamelCase = 77
lowerCamelCase = self.dummy_image.to(A )
lowerCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=A , )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=A , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCamelCase = 77
lowerCamelCase = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase = unet.half()
lowerCamelCase = vae.half()
lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type="""np""" , image=A , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase = init_image.resize((7_60, 5_04) )
lowerCamelCase = """BAAI/AltDiffusion"""
lowerCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = """A fantasy landscape, trending on artstation"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type="""np""" , )
lowerCamelCase = output.images[0]
lowerCamelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
lowerCamelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCamelCase = init_image.resize((7_68, 5_12) )
lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowerCamelCase = """BAAI/AltDiffusion"""
lowerCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = """A fantasy landscape, trending on artstation"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type="""np""" , )
lowerCamelCase = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 252 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : int = botoa.client('''iam''' )
snake_case__ : List[Any] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCAmelCase , AssumeRolePolicyDocument=json.dumps(__lowerCAmelCase , indent=2 ) )
snake_case__ : List[str] = {
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCAmelCase , PolicyName=f"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(__lowerCAmelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"""role {role_name} already exists. Using existing one""" )
def _lowerCAmelCase ( __lowerCAmelCase ) -> str:
"""simple docstring"""
snake_case__ : str = botoa.client('''iam''' )
return iam_client.get_role(RoleName=__lowerCAmelCase )["Role"]["Arn"]
def _lowerCAmelCase ( ) -> List[str]:
"""simple docstring"""
snake_case__ : Union[str, Any] = _ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , __lowerCAmelCase , )
snake_case__ : Tuple = None
if credentials_configuration == 0:
snake_case__ : int = _ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
snake_case__ : int = aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
snake_case__ : Optional[int] = _ask_field('''AWS Access Key ID: ''' )
snake_case__ : Optional[Any] = aws_access_key_id
snake_case__ : Dict = _ask_field('''AWS Secret Access Key: ''' )
snake_case__ : Optional[Any] = aws_secret_access_key
snake_case__ : Optional[int] = _ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
snake_case__ : Optional[Any] = aws_region
snake_case__ : int = _ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , __lowerCAmelCase , )
if role_management == 0:
snake_case__ : int = _ask_field('''Enter your IAM role name: ''' )
else:
snake_case__ : List[str] = '''accelerate_sagemaker_execution_role'''
print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(__lowerCAmelCase )
snake_case__ : Optional[int] = _ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__lowerCAmelCase , error_message='''Please enter yes or no.''' , )
snake_case__ : Dict = None
if is_custom_docker_image:
snake_case__ : Optional[Any] = _ask_field('''Enter your Docker image: ''' , lambda __lowerCAmelCase : str(__lowerCAmelCase ).lower() )
snake_case__ : Tuple = _ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__lowerCAmelCase , error_message='''Please enter yes or no.''' , )
snake_case__ : str = None
if is_sagemaker_inputs_enabled:
snake_case__ : Optional[int] = _ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda __lowerCAmelCase : str(__lowerCAmelCase ).lower() , )
snake_case__ : Tuple = _ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__lowerCAmelCase , error_message='''Please enter yes or no.''' , )
snake_case__ : Optional[Any] = None
if is_sagemaker_metrics_enabled:
snake_case__ : Dict = _ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda __lowerCAmelCase : str(__lowerCAmelCase ).lower() , )
snake_case__ : str = _ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
snake_case__ : str = {}
snake_case__ : Union[str, Any] = _ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=__lowerCAmelCase , error_message='''Please enter yes or no.''' , )
if use_dynamo:
snake_case__ : Optional[int] = '''dynamo_'''
snake_case__ : Union[str, Any] = _ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
snake_case__ : Dict = _ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__lowerCAmelCase , error_message='''Please enter yes or no.''' , )
if use_custom_options:
snake_case__ : Optional[Any] = _ask_options(
'''Which mode do you want to use?''' , __lowerCAmelCase , lambda __lowerCAmelCase : TORCH_DYNAMO_MODES[int(__lowerCAmelCase )] , default='''default''' , )
snake_case__ : Optional[int] = _ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__lowerCAmelCase , error_message='''Please enter yes or no.''' , )
snake_case__ : int = _ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=__lowerCAmelCase , error_message='''Please enter yes or no.''' , )
snake_case__ : Dict = '''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
snake_case__ : Union[str, Any] = _ask_options(
__lowerCAmelCase , __lowerCAmelCase , lambda __lowerCAmelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCAmelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
snake_case__ : List[str] = _ask_field(__lowerCAmelCase , lambda __lowerCAmelCase : str(__lowerCAmelCase ).lower() , default='''ml.p3.2xlarge''' )
snake_case__ : List[str] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
snake_case__ : Any = _ask_field(
'''How many machines do you want use? [1]: ''' , __lowerCAmelCase , default=1 , )
snake_case__ : Dict = _ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=__lowerCAmelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowerCAmelCase , use_cpu=__lowerCAmelCase , dynamo_config=__lowerCAmelCase , eca_instance_type=__lowerCAmelCase , profile=__lowerCAmelCase , region=__lowerCAmelCase , iam_role_name=__lowerCAmelCase , mixed_precision=__lowerCAmelCase , num_machines=__lowerCAmelCase , sagemaker_inputs_file=__lowerCAmelCase , sagemaker_metrics_file=__lowerCAmelCase , )
| 352 |
A__ = 256
# Modulus to hash a string
A__ = 100_0003
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
"""simple docstring"""
snake_case__ : str = len(__lowerCAmelCase )
snake_case__ : Optional[int] = len(__lowerCAmelCase )
if p_len > t_len:
return False
snake_case__ : str = 0
snake_case__ : Union[str, Any] = 0
snake_case__ : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
snake_case__ : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ : str = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ : str = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
snake_case__ : Optional[int] = '''abc1abc12'''
snake_case__ : Dict = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
snake_case__ : int = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
snake_case__ : int = '''ABABX'''
snake_case__ : Any = '''ABABZABABYABABX'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
snake_case__ : Dict = '''AAAB'''
snake_case__ : Union[str, Any] = '''ABAAAAAB'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
snake_case__ : Union[str, Any] = '''abcdabcy'''
snake_case__ : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
snake_case__ : Dict = '''Lü'''
snake_case__ : Optional[Any] = '''Lüsai'''
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
snake_case__ : str = '''Lue'''
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 44 | 0 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : Optional[Any]=None , **__snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()]
lowerCamelCase_ =[x.strip() for x in open(__snake_case ).readlines()][: len(__snake_case )]
lowerCamelCase_ =calculate_rouge(__snake_case , __snake_case , **__snake_case )
if save_path is not None:
save_json(__snake_case , __snake_case , indent=__snake_case )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 75 |
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Dict , A : Union[str, Any]=13 , A : Dict=7 , A : Dict=True , A : Tuple=True , A : Union[str, Any]=True , A : int=True , A : Optional[int]=99 , A : List[str]=32 , A : List[Any]=5 , A : int=4 , A : Any=37 , A : Optional[int]="gelu" , A : Optional[Any]=0.1 , A : Any=0.1 , A : Union[str, Any]=5_12 , A : int=16 , A : List[str]=2 , A : Union[str, Any]=0.0_2 , A : Union[str, Any]=4 , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_choices
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase = None
if self.use_attention_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( A , unittest.TestCase ):
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerModelTester(self)
@slow
def _lowerCamelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained('junnyu/roformer_chinese_small' , from_pt=A)
_UpperCAmelCase = model(np.ones((1, 1)))
self.assertIsNotNone(A)
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase = jnp.array([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase = model(A)[0]
_UpperCAmelCase = 5_00_00
_UpperCAmelCase = (1, 6, vocab_size)
self.assertEqual(output.shape , A)
_UpperCAmelCase = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]])
self.assertTrue(jnp.allclose(output[:, :3, :3] , A , atol=1E-4))
| 339 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : List[Any] ) -> int:
__a = min(_lowerCamelCase ) # min() finds the minimum value
__a = max(_lowerCamelCase ) # max() finds the maximum value
__a = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__a = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__a = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
__a = count + min_val
i += 1
def lowercase ( ) -> List[Any]:
__a = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print('''Sorted order is:''' , ''' '''.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 0 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class a__ :
def __init__( self , _A , _A=9_9 , _A=1_3 , _A=1_6 , _A=7 , _A=True , _A=True , _A=True , _A=False , _A=True , _A=2 , _A=3_2 , _A=4 , _A=4 , _A=3_0 , _A=0 , _A=1 , _A=2 , _A=None , ):
"""simple docstring"""
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = decoder_seq_length
# For common tests
__lowerCAmelCase = self.decoder_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_attention_mask
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = d_model
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = eos_token_id
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = pad_token_id
__lowerCAmelCase = decoder_start_token_id
__lowerCAmelCase = use_cache
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = None
__lowerCAmelCase = decoder_seq_length
__lowerCAmelCase = 2
__lowerCAmelCase = 1
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = None
if self.use_attention_mask:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__lowerCAmelCase = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A , _A , ):
"""simple docstring"""
__lowerCAmelCase = True
__lowerCAmelCase = TrOCRDecoder(config=_A ).to(_A ).eval()
__lowerCAmelCase = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__lowerCAmelCase = model(_A , use_cache=_A )
__lowerCAmelCase = model(_A )
__lowerCAmelCase = model(_A , use_cache=_A )
self.parent.assertTrue(len(_A ) == len(_A ) )
self.parent.assertTrue(len(_A ) == len(_A ) + 1 )
__lowerCAmelCase = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
__lowerCAmelCase = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCAmelCase = model(_A )["last_hidden_state"]
__lowerCAmelCase = model(_A , past_key_values=_A )["last_hidden_state"]
# select random slice
__lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCAmelCase = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__lowerCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_A , _A , atol=1E-3 )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_torch
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : List[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
_a : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
_a : Union[str, Any] = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
_a : List[Any] = True
_a : Dict = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = TrOCRStandaloneDecoderModelTester(self , is_training=_A )
__lowerCAmelCase = ConfigTester(self , config_class=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
pass
| 92 |
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
A_ : List[str] = sys.version_info >= (3, 10)
def __a ( SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> str:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = 42
a__ = 42
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field(default="toto" , metadata={"help": "help message"} )
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
class A_ ( _a ):
'''simple docstring'''
a__ = "titi"
a__ = "toto"
a__ = 42
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = BasicEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = MixedTypeEnum(self.foo )
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
@dataclass
class A_ :
'''simple docstring'''
a__ = list_field(default=[] )
a__ = list_field(default=[1, 2, 3] )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
a__ = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A_ :
'''simple docstring'''
a__ = field()
a__ = field()
a__ = field()
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = BasicEnum(self.required_enum )
@dataclass
class A_ :
'''simple docstring'''
a__ = 42
a__ = field()
a__ = None
a__ = field(default="toto" , metadata={"help": "help message"} )
a__ = list_field(default=["Hallo", "Bonjour", "Hello"] )
if is_python_no_less_than_3_10:
@dataclass
class A_ :
'''simple docstring'''
a__ = False
a__ = True
a__ = None
@dataclass
class A_ :
'''simple docstring'''
a__ = None
a__ = field(default=_a , metadata={"help": "help message"} )
a__ = None
a__ = list_field(default=[] )
a__ = list_field(default=[] )
class A_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ (self , lowercase__ , lowercase__ ) -> Optional[int]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
__UpperCAmelCase = {k: v for k, v in vars(lowercase__ ).items() if k != '''container'''}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get('''choices''' , lowercase__ ) and yy.get('''choices''' , lowercase__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx['''type'''](lowercase__ ) , yy['''type'''](lowercase__ ) )
del xx["type"], yy["type"]
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--bar''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--baz''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--flag''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = ['''--foo''', '''1''', '''--baz''', '''quux''', '''--bar''', '''0.5''']
((__UpperCAmelCase) , ) = parser.parse_args_into_dataclasses(lowercase__ , look_for_args_file=lowercase__ )
self.assertFalse(example.flag )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=42 , type=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Union[str, Any]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
expected.add_argument('''--baz''' , type=lowercase__ , default=lowercase__ , const=lowercase__ , nargs='''?''' )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument('''--no_baz''' , action='''store_false''' , default=lowercase__ , dest='''baz''' )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
__UpperCAmelCase = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--no_baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''--baz'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''True''', '''--baz''', '''True''', '''--opt''', '''True'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''False''', '''--baz''', '''False''', '''--opt''', '''False'''] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , baz=lowercase__ , opt=lowercase__ ) )
def lowerCAmelCase_ (self ) -> Dict:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=['''titi''', '''toto''', 42] , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''titi'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
__UpperCAmelCase = parser.parse_args_into_dataclasses(['''--foo''', '''42'''] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def lowerCAmelCase_ (self ) -> str:
@dataclass
class A_ :
'''simple docstring'''
a__ = "toto"
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument(
'''--foo''' , default='''toto''' , choices=('''titi''', '''toto''', 42) , type=make_choice_type_function(['''titi''', '''toto''', 42] ) , )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(args.foo , '''toto''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''titi'''] )
self.assertEqual(args.foo , '''titi''' )
__UpperCAmelCase = parser.parse_args(['''--foo''', '''42'''] )
self.assertEqual(args.foo , 42 )
def lowerCAmelCase_ (self ) -> str:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo_int''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--bar_int''' , nargs='''+''' , default=[1, 2, 3] , type=lowercase__ )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
expected.add_argument('''--foo_float''' , nargs='''+''' , default=[0.1, 0.2, 0.3] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(
lowercase__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=['''Hallo''', '''Bonjour''', '''Hello'''] , foo_float=[0.1, 0.2, 0.3] ) , )
__UpperCAmelCase = parser.parse_args('''--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7'''.split() )
self.assertEqual(lowercase__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=['''a''', '''b''', '''c'''] , foo_float=[0.1, 0.7] ) )
def lowerCAmelCase_ (self ) -> List[str]:
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--bar''' , default=lowercase__ , type=lowercase__ , help='''help message''' )
expected.add_argument('''--baz''' , default=lowercase__ , type=lowercase__ )
expected.add_argument('''--ces''' , nargs='''+''' , default=[] , type=lowercase__ )
expected.add_argument('''--des''' , nargs='''+''' , default=[] , type=lowercase__ )
__UpperCAmelCase = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(lowercase__ )
for dataclass_type in dataclass_types:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_args([] )
self.assertEqual(lowercase__ , Namespace(foo=lowercase__ , bar=lowercase__ , baz=lowercase__ , ces=[] , des=[] ) )
__UpperCAmelCase = parser.parse_args('''--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3'''.split() )
self.assertEqual(lowercase__ , Namespace(foo=12 , bar=3.14 , baz='''42''' , ces=['''a''', '''b''', '''c'''] , des=[1, 2, 3] ) )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--required_list''' , nargs='''+''' , type=lowercase__ , required=lowercase__ )
expected.add_argument('''--required_str''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = argparse.ArgumentParser()
expected.add_argument('''--foo''' , type=lowercase__ , required=lowercase__ )
expected.add_argument(
'''--required_enum''' , type=make_choice_type_function(['''titi''', '''toto'''] ) , choices=['''titi''', '''toto'''] , required=lowercase__ , )
expected.add_argument('''--opt''' , type=lowercase__ , default=lowercase__ )
expected.add_argument('''--baz''' , default='''toto''' , type=lowercase__ , help='''help message''' )
expected.add_argument('''--foo_str''' , nargs='''+''' , default=['''Hallo''', '''Bonjour''', '''Hello'''] , type=lowercase__ )
self.argparsersEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Optional[int]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
__UpperCAmelCase = parser.parse_dict(lowercase__ )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
'''extra''': 42,
}
self.assertRaises(lowercase__ , parser.parse_dict , lowercase__ , allow_extra_keys=lowercase__ )
def lowerCAmelCase_ (self ) -> Any:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_json''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.json''' , '''w+''' ) as f:
json.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.json''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> List[Any]:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
__UpperCAmelCase = {
'''foo''': 12,
'''bar''': 3.14,
'''baz''': '''42''',
'''flag''': True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
__UpperCAmelCase = os.path.join(lowercase__ , '''temp_yaml''' )
os.mkdir(lowercase__ )
with open(temp_local_path + '''.yaml''' , '''w+''' ) as f:
yaml.dump(lowercase__ , lowercase__ )
__UpperCAmelCase = parser.parse_yaml_file(Path(temp_local_path + '''.yaml''' ) )[0]
__UpperCAmelCase = BasicExample(**lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
def lowerCAmelCase_ (self ) -> Tuple:
__UpperCAmelCase = HfArgumentParser(lowercase__ )
self.assertIsNotNone(lowercase__ )
| 333 | 0 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE = 4 ):
A_ : List[str] = abs(SCREAMING_SNAKE_CASE ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE )] for y in range(SCREAMING_SNAKE_CASE )]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_row(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_column(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE ) )
# OR.. reverse_column(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return reverse_column(transpose(SCREAMING_SNAKE_CASE ) )
# OR.. transpose(reverse_row(matrix))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = [list(SCREAMING_SNAKE_CASE ) for x in zip(*SCREAMING_SNAKE_CASE )]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = matrix[::-1]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = [x[::-1] for x in matrix]
return matrix
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
for i in matrix:
print(*SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
UpperCamelCase = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 359 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE )]
A_ : Any = []
def generate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Dict = [0] * n
res.append(tuple(SCREAMING_SNAKE_CASE ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : Optional[int] = arr[i], arr[0]
else:
A_ , A_ : Tuple = arr[i], arr[c[i]]
res.append(tuple(SCREAMING_SNAKE_CASE ) )
c[i] += 1
A_ : Optional[int] = 0
else:
A_ : str = 0
i += 1
generate(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
UpperCamelCase = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 65 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *UpperCAmelCase__ : int , **UpperCAmelCase__ : List[Any]) ->None:
'''simple docstring'''
warnings.warn(
'''The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use PoolFormerImageProcessor instead.''' , UpperCAmelCase__ , )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = DanceDiffusionPipeline
a_ = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
a_ = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
a_ = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
a_ = False
a_ = False
def lowercase ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=lowerCAmelCase_ , use_timestep_embedding=lowerCAmelCase_ , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
__lowerCAmelCase = IPNDMScheduler()
__lowerCAmelCase = {
'unet': unet,
'scheduler': scheduler,
}
return components
def lowercase ( self : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str]=0 ) -> Any:
if str(lowerCAmelCase_ ).startswith('mps' ):
__lowerCAmelCase = torch.manual_seed(lowerCAmelCase_ )
else:
__lowerCAmelCase = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
__lowerCAmelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = DanceDiffusionPipeline(**lowerCAmelCase_ )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = self.get_dummy_inputs(lowerCAmelCase_ )
__lowerCAmelCase = pipe(**lowerCAmelCase_ )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
__lowerCAmelCase = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase ( self : Union[str, Any] ) -> Tuple:
return super().test_save_load_local()
@skip_mps
def lowercase ( self : List[str] ) -> Dict:
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def lowercase ( self : str ) -> List[str]:
return super().test_save_load_optional_components()
@skip_mps
def lowercase ( self : List[Any] ) -> List[str]:
return super().test_attention_slicing_forward_pass()
def lowercase ( self : str ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Any ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase ( self : Tuple ) -> Dict:
__lowerCAmelCase = torch_device
__lowerCAmelCase = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = pipe(generator=lowerCAmelCase_ , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
__lowerCAmelCase = output.audios
__lowerCAmelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
__lowerCAmelCase = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 207 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_snake_case : Union[str, Any] = False
try:
_snake_case : Tuple = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = [] ) -> Optional[int]:
__lowerCAmelCase = 0
__lowerCAmelCase = choices
__lowerCAmelCase = prompt
if sys.platform == "win32":
__lowerCAmelCase = '*'
else:
__lowerCAmelCase = '➔ '
def lowercase ( self : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : str = "" ) -> Any:
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , lowerCAmelCase_ )
else:
forceWrite(self.choices[index] , lowerCAmelCase_ )
def lowercase ( self : List[Any] , lowerCAmelCase_ : int ) -> List[Any]:
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(lowerCAmelCase_ )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowercase ( self : Dict , lowerCAmelCase_ : Direction , lowerCAmelCase_ : int = 1 ) -> Union[str, Any]:
__lowerCAmelCase = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(lowerCAmelCase_ )
move_cursor(lowerCAmelCase_ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def lowercase ( self : Optional[int] ) -> Tuple:
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def lowercase ( self : str ) -> Optional[Any]:
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def lowercase ( self : str ) -> Any:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def lowercase ( self : Dict ) -> Optional[Any]:
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(lowerCAmelCase_ )] for number in range(1_0 )] )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = int(chr(self.current_selection ) )
__lowerCAmelCase = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , lowerCAmelCase_ )
else:
return
else:
return
def lowercase ( self : Any , lowerCAmelCase_ : int = 0 ) -> int:
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCAmelCase = default_choice
for i in range(len(self.choices ) ):
self.print_choice(lowerCAmelCase_ )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCAmelCase = int(builtins.input() )
except ValueError:
__lowerCAmelCase = default_choice
else:
__lowerCAmelCase = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(lowerCAmelCase_ , '\n' )
return choice
| 207 | 1 |
UpperCAmelCase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 195 | """simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = '''vit_mae'''
def __init__( self : Union[str, Any] , _A : Dict=768 , _A : Optional[Any]=12 , _A : Optional[Any]=12 , _A : List[str]=3_072 , _A : List[Any]="gelu" , _A : List[str]=0.0 , _A : int=0.0 , _A : Union[str, Any]=0.02 , _A : Dict=1E-12 , _A : Union[str, Any]=224 , _A : Optional[Any]=16 , _A : int=3 , _A : Any=True , _A : Optional[Any]=16 , _A : Dict=512 , _A : Any=8 , _A : Dict=2_048 , _A : int=0.75 , _A : Dict=False , **_A : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**_A )
lowercase : Union[str, Any] = hidden_size
lowercase : Optional[int] = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : int = intermediate_size
lowercase : List[str] = hidden_act
lowercase : str = hidden_dropout_prob
lowercase : Tuple = attention_probs_dropout_prob
lowercase : Dict = initializer_range
lowercase : Optional[int] = layer_norm_eps
lowercase : int = image_size
lowercase : Any = patch_size
lowercase : str = num_channels
lowercase : Tuple = qkv_bias
lowercase : Dict = decoder_num_attention_heads
lowercase : Tuple = decoder_hidden_size
lowercase : Union[str, Any] = decoder_num_hidden_layers
lowercase : str = decoder_intermediate_size
lowercase : str = mask_ratio
lowercase : Union[str, Any] = norm_pix_loss | 116 |
lowerCAmelCase_ = range(2, 20 + 1)
lowerCAmelCase_ = [10**k for k in range(ks[-1] + 1)]
lowerCAmelCase_ = {}
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : str = sum(a_i[j] for j in range(__magic_name__ , len(__magic_name__ ) ) )
lowercase : Any = sum(a_i[j] * base[j] for j in range(min(len(__magic_name__ ) , __magic_name__ ) ) )
lowercase , lowercase : Optional[int] = 0, 0
lowercase : str = n - i
lowercase : Optional[int] = memo.get(__magic_name__ )
if sub_memo is not None:
lowercase : List[str] = sub_memo.get(__magic_name__ )
if jumps is not None and len(__magic_name__ ) > 0:
# find and make the largest jump without going over
lowercase : Dict = -1
for _k in range(len(__magic_name__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowercase : Any = _k
break
if max_jump >= 0:
lowercase , lowercase , lowercase : List[str] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowercase : str = diff + c
for j in range(min(__magic_name__ , len(__magic_name__ ) ) ):
lowercase , lowercase : Optional[Any] = divmod(__magic_name__ , 10 )
if new_c > 0:
add(__magic_name__ , __magic_name__ , __magic_name__ )
else:
lowercase : Dict = []
else:
lowercase : Union[str, Any] = {c: []}
lowercase : Optional[Any] = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowercase , lowercase : str = next_term(__magic_name__ , k - 1 , i + dn , __magic_name__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowercase , lowercase : Optional[Any] = compute(__magic_name__ , __magic_name__ , i + dn , __magic_name__ )
diff += _diff
dn += terms_jumped
lowercase : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowercase : List[Any] = 0
while j < len(__magic_name__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(__magic_name__ , (diff, dn, k) )
return (diff, dn)
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
if i >= n:
return 0, i
if k > len(__magic_name__ ):
a_i.extend([0 for _ in range(k - len(__magic_name__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowercase : Optional[Any] = i
lowercase , lowercase , lowercase : List[str] = 0, 0, 0
for j in range(len(__magic_name__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowercase : List[str] = ds_c + ds_b
diff += addend
lowercase : Tuple = 0
for j in range(__magic_name__ ):
lowercase : int = a_i[j] + addend
lowercase , lowercase : Any = divmod(__magic_name__ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(__magic_name__ , __magic_name__ , __magic_name__ )
return diff, i - start_i
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
for j in range(__magic_name__ , len(__magic_name__ ) ):
lowercase : Any = digits[j] + addend
if s >= 10:
lowercase , lowercase : List[str] = divmod(__magic_name__ , 10 )
lowercase : List[str] = addend // 10 + quotient
else:
lowercase : Optional[Any] = s
lowercase : Tuple = addend // 10
if addend == 0:
break
while addend > 0:
lowercase , lowercase : str = divmod(__magic_name__ , 10 )
digits.append(__magic_name__ )
def snake_case( __magic_name__ = 10**15 ) -> int:
'''simple docstring'''
lowercase : List[Any] = [1]
lowercase : List[Any] = 1
lowercase : str = 0
while True:
lowercase , lowercase : str = next_term(__magic_name__ , 20 , i + dn , __magic_name__ )
dn += terms_jumped
if dn == n - i:
break
lowercase : str = 0
for j in range(len(__magic_name__ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f'''{solution() = }''') | 116 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Tuple = '''codegen'''
_UpperCAmelCase : List[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : List[Any] , lowerCAmelCase__ : int=5_0400 , lowerCAmelCase__ : Dict=2048 , lowerCAmelCase__ : Optional[int]=2048 , lowerCAmelCase__ : Union[str, Any]=4096 , lowerCAmelCase__ : Optional[int]=28 , lowerCAmelCase__ : str=16 , lowerCAmelCase__ : Union[str, Any]=64 , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Tuple="gelu_new" , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : Any=1E-5 , lowerCAmelCase__ : Optional[int]=0.02 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : Any=5_0256 , lowerCAmelCase__ : int=5_0256 , lowerCAmelCase__ : Union[str, Any]=False , **lowerCAmelCase__ : str , ):
SCREAMING_SNAKE_CASE_: Optional[int] = vocab_size
SCREAMING_SNAKE_CASE_: List[str] = n_ctx
SCREAMING_SNAKE_CASE_: List[Any] = n_positions
SCREAMING_SNAKE_CASE_: List[str] = n_embd
SCREAMING_SNAKE_CASE_: Optional[int] = n_layer
SCREAMING_SNAKE_CASE_: Optional[Any] = n_head
SCREAMING_SNAKE_CASE_: Union[str, Any] = n_inner
SCREAMING_SNAKE_CASE_: List[str] = rotary_dim
SCREAMING_SNAKE_CASE_: Dict = activation_function
SCREAMING_SNAKE_CASE_: Dict = resid_pdrop
SCREAMING_SNAKE_CASE_: List[str] = embd_pdrop
SCREAMING_SNAKE_CASE_: List[Any] = attn_pdrop
SCREAMING_SNAKE_CASE_: int = layer_norm_epsilon
SCREAMING_SNAKE_CASE_: str = initializer_range
SCREAMING_SNAKE_CASE_: List[Any] = use_cache
SCREAMING_SNAKE_CASE_: Any = bos_token_id
SCREAMING_SNAKE_CASE_: Optional[Any] = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , **lowerCAmelCase__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : PretrainedConfig , lowerCAmelCase__ : str = "default" , lowerCAmelCase__ : List[PatchingSpec] = None , lowerCAmelCase__ : bool = False , ):
super().__init__(lowerCAmelCase__ , task=lowerCAmelCase__ , patching_specs=lowerCAmelCase__ , use_past=lowerCAmelCase__)
if not getattr(self._config , "pad_token_id" , lowerCAmelCase__):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE_: Optional[int] = 0
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="inputs")
SCREAMING_SNAKE_CASE_: Tuple = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE_: int = {0: "batch", 1: "sequence"}
return common_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return self._config.n_layer
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
return self._config.n_head
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ):
SCREAMING_SNAKE_CASE_: str = super(lowerCAmelCase__ , self).generate_dummy_inputs(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , seq_length=lowerCAmelCase__ , is_pair=lowerCAmelCase__ , framework=lowerCAmelCase__)
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE_: str = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE_: Union[str, Any] = seqlen + 2
SCREAMING_SNAKE_CASE_: Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE_: Tuple = [
(torch.zeros(lowerCAmelCase__), torch.zeros(lowerCAmelCase__)) for _ in range(self.num_layers)
]
SCREAMING_SNAKE_CASE_: Dict = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE_: Union[str, Any] = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE_: List[str] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCAmelCase__ , lowerCAmelCase__ , dtype=lowerCAmelCase__)] , dim=1)
return ordered_inputs
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
return 13
| 13 |
from __future__ import annotations
def _UpperCAmelCase (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ):
_A : Dict = list(range(len(UpperCamelCase__ ) ) )
_A : Any = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )]
index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ )
_A : float = 0
_A : list[float] = [0] * len(UpperCamelCase__ )
for i in index:
if weight[i] <= capacity:
_A : Union[str, Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
_A : Optional[Any] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict=7 , __UpperCAmelCase : Optional[Any]=3 , __UpperCAmelCase : Union[str, Any]=18 , __UpperCAmelCase : Any=30 , __UpperCAmelCase : Optional[int]=400 , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : str=True , __UpperCAmelCase : Any=[0.5, 0.5, 0.5] , __UpperCAmelCase : Union[str, Any]=[0.5, 0.5, 0.5] , ) ->int:
"""simple docstring"""
a = size if size is not None else {'''shortest_edge''': 18}
a = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
a = parent
a = batch_size
a = num_channels
a = image_size
a = min_resolution
a = max_resolution
a = do_resize
a = size
a = do_center_crop
a = crop_size
a = do_normalize
a = image_mean
a = image_std
def __lowerCAmelCase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = LevitImageProcessor if is_vision_available() else None
def __lowerCAmelCase ( self : Optional[int] ) ->str:
"""simple docstring"""
a = LevitImageProcessingTester(self )
@property
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
def __lowerCAmelCase ( self : List[str] ) ->Union[str, Any]:
"""simple docstring"""
a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
a = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Dict ) ->Tuple:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self : List[str] ) ->Optional[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def __lowerCAmelCase ( self : List[str] ) ->List[Any]:
"""simple docstring"""
a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
a = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
a = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 26 |
from __future__ import annotations
def _a ( a :dict , a :str ) -> set[str]:
a , a = set(a ), [start]
while stack:
a = stack.pop()
explored.add(a )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(a )
return explored
UpperCAmelCase__ = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 26 | 1 |
def lowercase_ (A : Any , A : Dict ):
if len(__A ) != len(__A ):
raise ValueError('String lengths must match!' )
snake_case__ : List[str] = 0
for chara, chara in zip(__A , __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(UpperCAmelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(UpperCAmelCase ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCamelCase = None
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
__UpperCamelCase = {
'''moussaKam/mbarthez''': 1024,
'''moussaKam/barthez''': 1024,
'''moussaKam/barthez-orangesum-title''': 1024,
}
__UpperCamelCase = '''▁'''
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE_ = BarthezTokenizer
def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token
super().__init__(
lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, )
snake_case_ = vocab_file
snake_case_ = False if not self.vocab_file else True
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case_ = [self.cls_token_id]
snake_case_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]:
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCAmelCase__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
snake_case_ = os.path.join(
lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__):
copyfile(self.vocab_file, lowerCAmelCase__)
return (out_vocab_file,)
| 312 | 1 |
from math import ceil, sqrt
def a ( lowerCamelCase_ = 100_0000 ):
'''simple docstring'''
lowercase__ = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase__ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase__ = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"{solution() = }")
| 207 |
from math import asin, atan, cos, radians, sin, sqrt, tan
A__ : Optional[int] = 637_8137.0
A__ : List[str] = 635_6752.31_4245
A__ : Union[str, Any] = 6_37_81_37
def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
'''simple docstring'''
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(lowerCamelCase_ ) ) )
lowercase__ = radians(lowerCamelCase_ )
lowercase__ = radians(lowerCamelCase_ )
# Equation
lowercase__ = sin((phi_a - phi_a) / 2 )
lowercase__ = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
lowercase__ = sqrt(sin_sq_phi + (cos(lowerCamelCase_ ) * cos(lowerCamelCase_ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(lowerCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207 | 1 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_lowerCAmelCase : Optional[Any] = """src/transformers"""
_lowerCAmelCase : List[str] = """docs/source/en/tasks"""
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple , snake_case : str )-> Any:
'''simple docstring'''
with open(snake_case , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase__ : Tuple = f.readlines()
# Find the start prompt.
UpperCAmelCase__ : Dict = 0
while not lines[start_index].startswith(snake_case ):
start_index += 1
start_index += 1
UpperCAmelCase__ : List[str] = start_index
while not lines[end_index].startswith(snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : Any = direct_transformers_import(TRANSFORMERS_PATH)
_lowerCAmelCase : Optional[Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_lowerCAmelCase : Optional[int] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase__ : Union[str, Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(snake_case , set() )
UpperCAmelCase__ : str = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : List[Any]=False )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[str] = _find_text_in_file(
filename=os.path.join(snake_case , snake_case ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
UpperCAmelCase__ : Optional[int] = get_model_list_for_task(snake_case )
if current_list != new_list:
if overwrite:
with open(os.path.join(snake_case , snake_case ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
" to fix this." )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 368 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =XLMTokenizer
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(snake_case__ ) )
def __a ( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = "lower newer"
UpperCAmelCase__ : Optional[Any] = "lower newer"
return input_text, output_text
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase__ : List[Any] = "lower"
UpperCAmelCase__ : Any = ["low", "er</w>"]
UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"]
UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ )
UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 298 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE_:Tuple = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , ) -> List[Any]:
"""simple docstring"""
if attention_mask is None:
A : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__=13, lowerCamelCase__=7, lowerCamelCase__=True, lowerCamelCase__=False, lowerCamelCase__=99, lowerCamelCase__=16, lowerCamelCase__=2, lowerCamelCase__=4, lowerCamelCase__=4, lowerCamelCase__="gelu", lowerCamelCase__=0.1, lowerCamelCase__=0.1, lowerCamelCase__=32, lowerCamelCase__=2, lowerCamelCase__=1, lowerCamelCase__=0, lowerCamelCase__=0.02, ):
A : Union[str, Any] = parent
A : Optional[Any] = batch_size
A : str = seq_length
A : Any = is_training
A : Optional[int] = use_labels
A : Optional[Any] = vocab_size
A : Any = hidden_size
A : str = num_hidden_layers
A : Tuple = num_attention_heads
A : Optional[int] = intermediate_size
A : Optional[int] = hidden_act
A : Union[str, Any] = hidden_dropout_prob
A : List[str] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : int = eos_token_id
A : Optional[Any] = pad_token_id
A : Any = bos_token_id
A : List[Any] = initializer_range
def _lowerCAmelCase ( self ):
A : int = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ), 3, self.vocab_size )
A : Optional[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.intaa )), -1 )
A : Optional[int] = shift_tokens_right(lowerCamelCase__, 1, 2 )
A : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=lowerCamelCase__, )
A : Any = prepare_blenderbot_inputs_dict(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
return config, inputs_dict
def _lowerCAmelCase ( self ):
A , A : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : int = 20
A : List[str] = model_class_name(lowerCamelCase__ )
A : List[str] = model.encode(inputs_dict["""input_ids"""] )
A , A : Optional[int] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A : Dict = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase__, lowerCamelCase__ )
A : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="""i4""" )
A : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
A : Tuple = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase__, decoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, decoder_position_ids=lowerCamelCase__, )
A : Tuple = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
A : Any = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase__, decoder_attention_mask=lowerCamelCase__, past_key_values=outputs_cache.past_key_values, decoder_position_ids=lowerCamelCase__, )
A : int = model.decode(lowerCamelCase__, lowerCamelCase__ )
A : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Tuple = 20
A : Union[str, Any] = model_class_name(lowerCamelCase__ )
A : Tuple = model.encode(inputs_dict["""input_ids"""] )
A , A : Optional[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
A : Optional[int] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
], axis=-1, )
A : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0], lowerCamelCase__, lowerCamelCase__ )
A : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), )
A : Union[str, Any] = model.decode(
decoder_input_ids[:, :-1], lowerCamelCase__, decoder_attention_mask=lowerCamelCase__, past_key_values=lowerCamelCase__, decoder_position_ids=lowerCamelCase__, )
A : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="""i4""" )
A : Dict = model.decode(
decoder_input_ids[:, -1:], lowerCamelCase__, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=lowerCamelCase__, decoder_position_ids=lowerCamelCase__, )
A : List[Any] = model.decode(lowerCamelCase__, lowerCamelCase__, decoder_attention_mask=lowerCamelCase__ )
A : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3, msg=f'''Max diff is {diff}''' )
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = 99
def _lowerCAmelCase ( self ):
A : Union[str, Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
], dtype=np.intaa, )
A : Optional[int] = input_ids.shape[0]
A : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
def _lowerCAmelCase ( self ):
A , A , A : List[Any] = self._get_config_and_data()
A : Dict = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
A : Optional[int] = lm_model(input_ids=lowerCamelCase__ )
A : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, )
A : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase__ )
A : List[Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.intaa )
A : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.intaa )
A : Tuple = lm_model(input_ids=lowerCamelCase__, decoder_input_ids=lowerCamelCase__ )
A : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Union[str, Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.intaa )
A : str = shift_tokens_right(lowerCamelCase__, 1, 2 )
A : Dict = np.equal(lowerCamelCase__, 1 ).astype(np.floataa ).sum()
A : Tuple = np.equal(lowerCamelCase__, 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape, input_ids.shape )
self.assertEqual(lowerCamelCase__, n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0], 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCamelCase : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def _lowerCAmelCase ( self ):
A : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def _lowerCAmelCase ( self ):
A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A , A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A : Optional[Any] = self._prepare_for_class(lowerCamelCase__, lowerCamelCase__ )
A : List[str] = model_class(lowerCamelCase__ )
@jax.jit
def encode_jitted(lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
return model.encode(input_ids=lowerCamelCase__, attention_mask=lowerCamelCase__ )
with self.subTest("""JIT Enabled""" ):
A : Any = encode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A : Union[str, Any] = encode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ), len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
def _lowerCAmelCase ( self ):
A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A : List[str] = model_class(lowerCamelCase__ )
A : Union[str, Any] = model.encode(inputs_dict["""input_ids"""], inputs_dict["""attention_mask"""] )
A : Union[str, Any] = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
return model.decode(
decoder_input_ids=lowerCamelCase__, decoder_attention_mask=lowerCamelCase__, encoder_outputs=lowerCamelCase__, )
with self.subTest("""JIT Enabled""" ):
A : Optional[int] = decode_jitted(**lowerCamelCase__ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
A : int = decode_jitted(**lowerCamelCase__ ).to_tuple()
self.assertEqual(len(lowerCamelCase__ ), len(lowerCamelCase__ ) )
for jitted_output, output in zip(lowerCamelCase__, lowerCamelCase__ ):
self.assertEqual(jitted_output.shape, output.shape )
@slow
def _lowerCAmelCase ( self ):
for model_class_name in self.all_model_classes:
A : Optional[Any] = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
A : Union[str, Any] = model(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
| 116 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ):
super().__init__()
A : Union[str, Any] = initial_learning_rate
A : List[Any] = warmup_steps
A : int = power
A : Optional[int] = decay_schedule_fn
A : int = name
def __call__( self, lowerCamelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A : str = tf.cast(lowerCamelCase__, tf.floataa )
A : List[Any] = tf.cast(self.warmup_steps, tf.floataa )
A : Dict = global_step_float / warmup_steps_float
A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , )
if num_warmup_steps:
A : Dict = WarmUp(
initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , )
if weight_decay_rate > 0.0:
A : str = AdamWeightDecay(
learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , )
else:
A : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : int = weight_decay_rate
A : Any = include_in_weight_decay
A : Dict = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[str] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
A , A : Dict = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A : Union[str, Any] = apply_state or {}
A : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ):
A : List[str] = []
A : List[str] = None
@property
def _lowerCAmelCase ( self ):
if self._accum_steps is None:
A : str = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self, lowerCamelCase__ ):
if not self._gradients:
A : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 116 | 1 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class _snake_case (tf.keras.layers.Layer ):
def __init__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case=1 ,_snake_case=False ,**_snake_case ):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[int] = vocab_size
UpperCAmelCase_ : Union[str, Any] = d_embed
UpperCAmelCase_ : Union[str, Any] = d_proj
UpperCAmelCase_ : Dict = cutoffs + [vocab_size]
UpperCAmelCase_ : Union[str, Any] = [0] + self.cutoffs
UpperCAmelCase_ : str = div_val
UpperCAmelCase_ : Optional[int] = self.cutoffs[0]
UpperCAmelCase_ : str = len(self.cutoffs ) - 1
UpperCAmelCase_ : List[str] = self.shortlist_size + self.n_clusters
UpperCAmelCase_ : Optional[int] = keep_order
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Tuple = []
def UpperCamelCase__ ( self ,_snake_case ):
if self.n_clusters > 0:
UpperCAmelCase_ : int = self.add_weight(
shape=(self.n_clusters, self.d_embed) ,initializer="zeros" ,trainable=_snake_case ,name="cluster_weight" )
UpperCAmelCase_ : Dict = self.add_weight(
shape=(self.n_clusters,) ,initializer="zeros" ,trainable=_snake_case ,name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase_ : List[Any] = self.add_weight(
shape=(self.d_embed, self.d_proj) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_projs_._{i}''' ,)
self.out_projs.append(_snake_case )
else:
self.out_projs.append(_snake_case )
UpperCAmelCase_ : List[Any] = self.add_weight(
shape=(self.vocab_size, self.d_embed) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._weight''' ,)
UpperCAmelCase_ : str = self.add_weight(
shape=(self.vocab_size,) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._bias''' ,)
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ : Any = self.d_embed // (self.div_val**i)
UpperCAmelCase_ : Any = self.add_weight(
shape=(d_emb_i, self.d_proj) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_projs_._{i}''' )
self.out_projs.append(_snake_case )
UpperCAmelCase_ : str = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._weight''' ,)
UpperCAmelCase_ : List[Any] = self.add_weight(
shape=(r_idx - l_idx,) ,initializer="zeros" ,trainable=_snake_case ,name=f'''out_layers_._{i}_._bias''' ,)
self.out_layers.append((weight, bias) )
super().build(_snake_case )
@staticmethod
def UpperCamelCase__ ( _snake_case ,_snake_case ,_snake_case ,_snake_case=None ):
UpperCAmelCase_ : Optional[Any] = x
if proj is not None:
UpperCAmelCase_ : Tuple = tf.einsum("ibd,ed->ibe" ,_snake_case ,_snake_case )
return tf.einsum("ibd,nd->ibn" ,_snake_case ,_snake_case ) + b
@staticmethod
def UpperCamelCase__ ( _snake_case ,_snake_case ):
UpperCAmelCase_ : str = shape_list(_snake_case )
UpperCAmelCase_ : Union[str, Any] = tf.range(lp_size[0] ,dtype=target.dtype )
UpperCAmelCase_ : Any = tf.stack([r, target] ,1 )
return tf.gather_nd(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=True ,_snake_case=False ):
UpperCAmelCase_ : List[str] = 0
if self.n_clusters == 0:
UpperCAmelCase_ : List[Any] = self._logit(_snake_case ,self.out_layers[0][0] ,self.out_layers[0][1] ,self.out_projs[0] )
if target is not None:
UpperCAmelCase_ : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=_snake_case ,logits=_snake_case )
UpperCAmelCase_ : Dict = tf.nn.log_softmax(_snake_case ,axis=-1 )
else:
UpperCAmelCase_ : Any = shape_list(_snake_case )
UpperCAmelCase_ : Union[str, Any] = []
UpperCAmelCase_ : Dict = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase_ : Dict = (target >= l_idx) & (target < r_idx)
UpperCAmelCase_ : Any = tf.where(_snake_case )
UpperCAmelCase_ : Tuple = tf.boolean_mask(_snake_case ,_snake_case ) - l_idx
if self.div_val == 1:
UpperCAmelCase_ : Optional[int] = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase_ : Optional[Any] = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase_ : List[Any] = self.out_layers[i][0]
UpperCAmelCase_ : int = self.out_layers[i][1]
if i == 0:
UpperCAmelCase_ : Tuple = tf.concat([cur_W, self.cluster_weight] ,0 )
UpperCAmelCase_ : List[str] = tf.concat([cur_b, self.cluster_bias] ,0 )
UpperCAmelCase_ : Any = self._logit(_snake_case ,_snake_case ,_snake_case ,self.out_projs[0] )
UpperCAmelCase_ : Dict = tf.nn.log_softmax(_snake_case )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase_ : Dict = tf.boolean_mask(_snake_case ,_snake_case )
UpperCAmelCase_ : int = self._gather_logprob(_snake_case ,_snake_case )
else:
UpperCAmelCase_ : Optional[int] = self._logit(_snake_case ,_snake_case ,_snake_case ,self.out_projs[i] )
UpperCAmelCase_ : List[str] = tf.nn.log_softmax(_snake_case )
UpperCAmelCase_ : Tuple = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase_ : str = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(_snake_case )
if target is not None:
UpperCAmelCase_ : Tuple = tf.boolean_mask(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = tf.boolean_mask(_snake_case ,_snake_case )
UpperCAmelCase_ : Union[str, Any] = self._gather_logprob(_snake_case ,_snake_case )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(_snake_case ,-cur_logprob ,shape_list(_snake_case ) )
UpperCAmelCase_ : str = tf.concat(_snake_case ,axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase_ : Tuple = tf.reduce_mean(_snake_case )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(_snake_case )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(_snake_case ,name=self.name ,aggregation="mean" if return_mean else "" )
return out
| 351 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def a__ ( _SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase_ : Optional[Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(_SCREAMING_SNAKE_CASE : int ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : str = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Optional[Any] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Any = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_SCREAMING_SNAKE_CASE : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : Optional[Any] = 1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : Union[str, Any] = 8
else:
UpperCAmelCase_ : List[str] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding="longest" , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase_ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCamelCase = mocked_dataloaders # noqa: F811
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _SCREAMING_SNAKE_CASE ) == "1":
UpperCAmelCase_ : Tuple = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
UpperCAmelCase_ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase_ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : Optional[Any] = config["lr"]
UpperCAmelCase_ : Union[str, Any] = int(config["num_epochs"] )
UpperCAmelCase_ : str = int(config["seed"] )
UpperCAmelCase_ : Tuple = int(config["batch_size"] )
set_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : Tuple = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : Tuple = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : int = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
UpperCAmelCase_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
UpperCAmelCase_ : List[str] = os.path.split(_SCREAMING_SNAKE_CASE )[-1].split("." )[0]
accelerator.init_trackers(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
UpperCAmelCase_ : Dict = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Union[str, Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
UpperCAmelCase_ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _SCREAMING_SNAKE_CASE )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(_SCREAMING_SNAKE_CASE ),
"epoch": epoch,
} , step=_SCREAMING_SNAKE_CASE , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : int = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=_SCREAMING_SNAKE_CASE , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 67 | 0 |
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,snake_case_ ):
if index == r:
for j in range(snake_case_ ):
print(data[j],end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
_A : Tuple = arr[i]
combination_util(snake_case_,snake_case_,snake_case_,index + 1,snake_case_,i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(snake_case_,snake_case_,snake_case_,snake_case_,snake_case_,i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
# A temporary array to store all combination one by one
_A : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(snake_case_,snake_case_,snake_case_,0,snake_case_,0 )
if __name__ == "__main__":
# Driver code to check the function above
_snake_case = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 26 |
def lowerCAmelCase_ ( snake_case_ ):
if n_term == "":
return []
_A : list = []
for temp in range(int(snake_case_ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_snake_case = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 26 | 1 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __A :
@staticmethod
def __A ( *a__ , **a__ ):
pass
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> List[str]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
_a : Dict = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class __A ( unittest.TestCase ):
_UpperCamelCase : str = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Optional[int] = pipeline(
"""document-question-answering""" , model=a__ , tokenizer=a__ , image_processor=a__ )
_lowerCAmelCase : Tuple = INVOICE_URL
_lowerCAmelCase : List[str] = list(zip(*apply_tesseract(load_image(a__ ) , a__ , """""" ) ) )
_lowerCAmelCase : Union[str, Any] = """What is the placebo?"""
_lowerCAmelCase : str = [
{
"""image""": load_image(a__ ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def __A ( self , a__ , a__ ):
_lowerCAmelCase : List[str] = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{"""score""": ANY(a__ ), """answer""": ANY(a__ ), """start""": ANY(a__ ), """end""": ANY(a__ )},
{"""score""": ANY(a__ ), """answer""": ANY(a__ ), """start""": ANY(a__ ), """end""": ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __A ( self ):
_lowerCAmelCase : Tuple = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
_lowerCAmelCase : List[Any] = INVOICE_URL
_lowerCAmelCase : Optional[Any] = """How many cats are there?"""
_lowerCAmelCase : str = [
{"""score""": 0.0_0_0_1, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0_0_0_1, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
_lowerCAmelCase : Dict = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
_lowerCAmelCase : List[str] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_lowerCAmelCase : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase : int = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
_lowerCAmelCase : Dict = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : List[Any] = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __A ( self ):
_lowerCAmelCase : List[str] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
_lowerCAmelCase : int = INVOICE_URL
_lowerCAmelCase : Optional[Any] = """What is the invoice number?"""
_lowerCAmelCase : str = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase : Optional[int] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase : Tuple = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"""score""": 0.9_9_4_4, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_0_0_9, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __A ( self ):
_lowerCAmelCase : Optional[Any] = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
_lowerCAmelCase : Dict = INVOICE_URL
_lowerCAmelCase : Union[str, Any] = """What is the invoice number?"""
_lowerCAmelCase : int = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase : List[str] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase : Dict = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"""score""": 0.9_9_7_4, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9_9_4_8, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __A ( self ):
_lowerCAmelCase : str = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=a__ )
_lowerCAmelCase : Tuple = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=a__ , revision="""3dc6de3""" , )
_lowerCAmelCase : Optional[int] = INVOICE_URL
_lowerCAmelCase : List[Any] = """What is the invoice number?"""
_lowerCAmelCase : int = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_lowerCAmelCase : int = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
_lowerCAmelCase : Optional[int] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
_lowerCAmelCase : str = list(zip(*apply_tesseract(load_image(a__ ) , a__ , """""" ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase : Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.4_2_5_1, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0_8_1_9, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __A ( self ):
_lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=a__ )
_lowerCAmelCase : List[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=a__ , revision="""3dc6de3""" , max_seq_len=50 , )
_lowerCAmelCase : Optional[Any] = INVOICE_URL
_lowerCAmelCase : Union[str, Any] = """What is the invoice number?"""
_lowerCAmelCase : List[str] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
_lowerCAmelCase : Union[str, Any] = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
_lowerCAmelCase : Union[str, Any] = list(zip(*apply_tesseract(load_image(a__ ) , a__ , """""" ) ) )
# This model should also work if `image` is set to None
_lowerCAmelCase : int = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""score""": 0.9_9_9_9, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9_9_9_8, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def __A ( self ):
_lowerCAmelCase : str = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
_lowerCAmelCase : List[str] = INVOICE_URL
_lowerCAmelCase : int = """What is the invoice number?"""
_lowerCAmelCase : List[str] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def __A ( self ):
pass
| 126 | """simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_a : List[Any] = logging.get_logger(__name__)
class __A :
_UpperCamelCase : str = None
@experimental
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : List[str] ,_lowerCamelCase : List[Any] ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[int] ) -> List[Any]:
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
return _map_with_joblib(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : str ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Tuple ) -> Union[str, Any]:
_lowerCAmelCase : int = num_proc if num_proc <= len(_lowerCamelCase ) else len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = [] # We organize the splits ourselve (contiguous splits)
for index in range(_lowerCamelCase ):
_lowerCAmelCase : List[str] = len(_lowerCamelCase ) // num_proc
_lowerCAmelCase : str = len(_lowerCamelCase ) % num_proc
_lowerCAmelCase : Tuple = div * index + min(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : int = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(_lowerCamelCase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
f"Error dividing inputs iterable among processes. "
f"Total number of objects {len(_lowerCamelCase )}, "
f"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
f"Spawning {num_proc} processes for {len(_lowerCamelCase )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
_lowerCAmelCase , _lowerCAmelCase : List[str] = None, None
if not disable_tqdm:
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = (RLock(),), tqdm.set_lock
with Pool(_lowerCamelCase ,initargs=_lowerCamelCase ,initializer=_lowerCamelCase ) as pool:
_lowerCAmelCase : str = pool.map(_lowerCamelCase ,_lowerCamelCase )
logger.info(f"Finished {num_proc} processes" )
_lowerCAmelCase : int = [obj for proc_res in mapped for obj in proc_res]
logger.info(f"Unpacked {len(_lowerCamelCase )} objects" )
return mapped
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int ,_lowerCamelCase : Dict ,_lowerCamelCase : Tuple ,_lowerCamelCase : Any ,_lowerCamelCase : str ) -> Optional[Any]:
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name ,n_jobs=_lowerCamelCase ):
return joblib.Parallel()(
joblib.delayed(_lowerCamelCase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Any:
_lowerCAmelCase : List[Any] = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_lowerCAmelCase : Optional[Any] = None
| 126 | 1 |
from __future__ import annotations
def __snake_case ( __UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(1 ,len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 ,len(__UpperCamelCase ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 ,len(__UpperCamelCase ) ):
for j in range(1 ,len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod() | 312 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a :Any = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a :Optional[Any] = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
__a :List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 312 | 1 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
__a = logging.get_logger(__name__)
# General docstring
__a = 'ResNetConfig'
# Base docstring
__a = 'microsoft/resnet-50'
__a = [1, 2_048, 7, 7]
# Image classification docstring
__a = 'microsoft/resnet-50'
__a = 'tiger cat'
__a = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "relu" ) -> Optional[Any]:
super().__init__()
UpperCAmelCase_ : int = nn.Convad(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,padding=kernel_size // 2 ,bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tensor:
UpperCAmelCase_ : Any = self.convolution(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = self.normalization(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Any:
super().__init__()
UpperCAmelCase_ : Any = ResNetConvLayer(
config.num_channels ,config.embedding_size ,kernel_size=7 ,stride=2 ,activation=config.hidden_act )
UpperCAmelCase_ : int = nn.MaxPoolad(kernel_size=3 ,stride=2 ,padding=1 )
UpperCAmelCase_ : List[str] = config.num_channels
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tensor:
UpperCAmelCase_ : Union[str, Any] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
UpperCAmelCase_ : Dict = self.embedder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 2 ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ,stride=_SCREAMING_SNAKE_CASE ,bias=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tensor:
UpperCAmelCase_ : List[Any] = self.convolution(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "relu" ) -> List[str]:
super().__init__()
UpperCAmelCase_ : int = in_channels != out_channels or stride != 1
UpperCAmelCase_ : List[Any] = (
ResNetShortCut(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ : List[Any] = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ) ,ResNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,activation=_SCREAMING_SNAKE_CASE ) ,)
UpperCAmelCase_ : str = ACTaFN[activation]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : List[str] = hidden_state
UpperCAmelCase_ : List[str] = self.layer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCAmelCase_ : Tuple = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "relu" ,_SCREAMING_SNAKE_CASE = 4 ) -> Optional[int]:
super().__init__()
UpperCAmelCase_ : Union[str, Any] = in_channels != out_channels or stride != 1
UpperCAmelCase_ : Optional[int] = out_channels // reduction
UpperCAmelCase_ : Union[str, Any] = (
ResNetShortCut(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
UpperCAmelCase_ : str = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ) ,ResNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ) ,ResNetConvLayer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,kernel_size=1 ,activation=_SCREAMING_SNAKE_CASE ) ,)
UpperCAmelCase_ : Optional[Any] = ACTaFN[activation]
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
UpperCAmelCase_ : Optional[int] = hidden_state
UpperCAmelCase_ : List[str] = self.layer(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
UpperCAmelCase_ : Any = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 2 ,_SCREAMING_SNAKE_CASE = 2 ,) -> List[str]:
super().__init__()
UpperCAmelCase_ : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
UpperCAmelCase_ : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,stride=_SCREAMING_SNAKE_CASE ,activation=config.hidden_act ) ,*[layer(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,activation=config.hidden_act ) for _ in range(depth - 1 )] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Tensor:
UpperCAmelCase_ : List[str] = input
for layer in self.layers:
UpperCAmelCase_ : Union[str, Any] = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class __a( nn.Module ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
super().__init__()
UpperCAmelCase_ : Tuple = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,) )
UpperCAmelCase_ : Tuple = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE ,config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,depth=_SCREAMING_SNAKE_CASE ) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ) -> BaseModelOutputWithNoAttention:
UpperCAmelCase_ : Union[str, Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
UpperCAmelCase_ : Any = hidden_states + (hidden_state,)
UpperCAmelCase_ : int = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
UpperCAmelCase_ : str = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE ,hidden_states=_SCREAMING_SNAKE_CASE ,)
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = ResNetConfig
lowerCAmelCase = '''resnet'''
lowerCAmelCase = '''pixel_values'''
lowerCAmelCase = True
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE ,nn.Convad ):
nn.init.kaiming_normal_(module.weight ,mode='''fan_out''' ,nonlinearity='''relu''' )
elif isinstance(_SCREAMING_SNAKE_CASE ,(nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight ,1 )
nn.init.constant_(module.bias ,0 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Tuple:
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Any = value
__a = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__a = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'''The bare ResNet model outputting raw features without any specific head on top.''' , _a , )
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = config
UpperCAmelCase_ : List[Any] = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = ResNetEncoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC ,modality='''vision''' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ) -> BaseModelOutputWithPoolingAndNoAttention:
UpperCAmelCase_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : List[str] = self.embedder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.encoder(
_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = encoder_outputs[0]
UpperCAmelCase_ : Tuple = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE ,pooler_output=_SCREAMING_SNAKE_CASE ,hidden_states=encoder_outputs.hidden_states ,)
@add_start_docstrings(
'''
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _a , )
class __a( _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> List[str]:
super().__init__(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = config.num_labels
UpperCAmelCase_ : Any = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
UpperCAmelCase_ : List[str] = nn.Sequential(
nn.Flatten() ,nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity() ,)
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,) -> ImageClassifierOutputWithNoAttention:
UpperCAmelCase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : Dict = self.resnet(_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
UpperCAmelCase_ : int = self.classifier(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCAmelCase_ : int = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCAmelCase_ : int = '''single_label_classification'''
else:
UpperCAmelCase_ : str = '''multi_label_classification'''
if self.config.problem_type == "regression":
UpperCAmelCase_ : Any = MSELoss()
if self.num_labels == 1:
UpperCAmelCase_ : int = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
UpperCAmelCase_ : Any = loss_fct(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
UpperCAmelCase_ : Tuple = CrossEntropyLoss()
UpperCAmelCase_ : Optional[Any] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCAmelCase_ : Tuple = BCEWithLogitsLoss()
UpperCAmelCase_ : Union[str, Any] = loss_fct(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if not return_dict:
UpperCAmelCase_ : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE ,logits=_SCREAMING_SNAKE_CASE ,hidden_states=outputs.hidden_states )
@add_start_docstrings(
'''
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
''' , _a , )
class __a( _a , _a ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> Any:
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = [config.embedding_size] + config.hidden_sizes
UpperCAmelCase_ : str = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE ,config_class=_CONFIG_FOR_DOC )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ) -> BackboneOutput:
UpperCAmelCase_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCAmelCase_ : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCAmelCase_ : Optional[Any] = self.embedder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.encoder(_SCREAMING_SNAKE_CASE ,output_hidden_states=_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = outputs.hidden_states
UpperCAmelCase_ : Dict = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
UpperCAmelCase_ : List[Any] = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE ,hidden_states=outputs.hidden_states if output_hidden_states else None ,attentions=_SCREAMING_SNAKE_CASE ,) | 235 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SwinConfig()
UpperCAmelCase_ : Dict = swin_name.split('''_''' )
UpperCAmelCase_ : List[Any] = name_split[1]
UpperCAmelCase_ : Optional[Any] = int(name_split[4] )
UpperCAmelCase_ : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
UpperCAmelCase_ : Tuple = 96
UpperCAmelCase_ : Optional[int] = (2, 2, 6, 2)
UpperCAmelCase_ : str = (3, 6, 12, 24)
elif model_size == "small":
UpperCAmelCase_ : Dict = 96
UpperCAmelCase_ : str = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (3, 6, 12, 24)
elif model_size == "base":
UpperCAmelCase_ : Tuple = 128
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : Optional[int] = (4, 8, 16, 32)
else:
UpperCAmelCase_ : List[str] = 192
UpperCAmelCase_ : str = (2, 2, 18, 2)
UpperCAmelCase_ : Union[str, Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCAmelCase_ : int = 21841
else:
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : int = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : str = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Optional[int] = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = img_size
UpperCAmelCase_ : List[Any] = num_classes
UpperCAmelCase_ : str = embed_dim
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : Optional[int] = window_size
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
UpperCAmelCase_ : str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCAmelCase_ : Tuple = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
UpperCAmelCase_ : Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase_ : List[str] = '''layernorm.bias'''
if "head" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''head''' , '''classifier''' )
else:
UpperCAmelCase_ : Union[str, Any] = '''swin.''' + name
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : str = orig_state_dict.pop(_lowercase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase_ : str = key.split('''.''' )
UpperCAmelCase_ : str = int(key_split[1] )
UpperCAmelCase_ : Optional[Any] = int(key_split[3] )
UpperCAmelCase_ : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : Any = val[:dim, :]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[-dim:, :]
else:
UpperCAmelCase_ : Any = val[
:dim
]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2
]
UpperCAmelCase_ : Union[str, Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Optional[Any] = val
return orig_state_dict
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = timm.create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
UpperCAmelCase_ : Any = get_swin_config(_lowercase )
UpperCAmelCase_ : Union[str, Any] = SwinForImageClassification(_lowercase )
model.eval()
UpperCAmelCase_ : Tuple = convert_state_dict(timm_model.state_dict() , _lowercase )
model.load_state_dict(_lowercase )
UpperCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[Any] = timm_model(inputs['''pixel_values'''] )
UpperCAmelCase_ : Dict = model(**_lowercase ).logits
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) | 235 | 1 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _lowerCAmelCase :
_lowercase =XGLMConfig
_lowercase ={}
_lowercase ='''gelu'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=14 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=32 , _UpperCamelCase=2 , _UpperCamelCase=4 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=0.02 , ) -> Optional[Any]:
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = seq_length
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_input_mask
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = d_model
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = ffn_dim
lowerCAmelCase_ = activation_function
lowerCAmelCase_ = activation_dropout
lowerCAmelCase_ = attention_dropout
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = None
lowerCAmelCase_ = 0
lowerCAmelCase_ = 2
lowerCAmelCase_ = 1
def __a ( self ) -> str:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def __a ( self ) -> Any:
lowerCAmelCase_ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCAmelCase_ = None
if self.use_input_mask:
lowerCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ = self.get_config()
lowerCAmelCase_ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self ) -> Dict:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=_UpperCAmelCase , )
def __a ( self ) -> str:
lowerCAmelCase_ = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) = config_and_inputs
lowerCAmelCase_ = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_lowercase =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_lowercase =(TFXGLMForCausalLM,) if is_tf_available() else ()
_lowercase =(
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_lowercase =False
_lowercase =False
_lowercase =False
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = TFXGLMModelTester(self )
lowerCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , n_embd=37 )
def __a ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@slow
def __a ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = TFXGLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def __a ( self ) -> Optional[int]:
super().test_resize_token_embeddings()
@require_tf
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __a ( self , _UpperCamelCase=True ) -> Dict:
lowerCAmelCase_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase_ = tf.convert_to_tensor([[2, 268, 9_865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCAmelCase_ = [2, 268, 9_865, 67, 11, 1_988, 57_252, 9_865, 5, 984, 67, 1_988, 213_838, 1_658, 53, 70_446, 33, 6_657, 278, 1_581]
# fmt: on
lowerCAmelCase_ = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
@slow
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
lowerCAmelCase_ = tokenizer("Today is a nice day and" , return_tensors="tf" )
lowerCAmelCase_ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
lowerCAmelCase_ = model.generate(_UpperCAmelCase , do_sample=_UpperCAmelCase , seed=[7, 0] )
lowerCAmelCase_ = tokenizer.decode(output_ids[0] , skip_special_tokens=_UpperCAmelCase )
lowerCAmelCase_ = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@slow
def __a ( self ) -> Dict:
lowerCAmelCase_ = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase_ = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
lowerCAmelCase_ = "left"
# use different length sentences to test batching
lowerCAmelCase_ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
lowerCAmelCase_ = tokenizer(_UpperCAmelCase , return_tensors="tf" , padding=_UpperCAmelCase )
lowerCAmelCase_ = inputs["input_ids"]
lowerCAmelCase_ = model.generate(input_ids=_UpperCAmelCase , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
lowerCAmelCase_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
lowerCAmelCase_ = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12 )
lowerCAmelCase_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
lowerCAmelCase_ = model.generate(input_ids=_UpperCAmelCase , max_new_tokens=12 )
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
lowerCAmelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_UpperCAmelCase )
lowerCAmelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=_UpperCAmelCase )
lowerCAmelCase_ = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [non_padded_sentence, padded_sentence] )
| 231 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A = ["image_processor", "tokenizer"]
A = "OwlViTImageProcessor"
A = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ) -> str:
__UpperCamelCase : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _UpperCAmelCase , )
__UpperCamelCase : str = kwargs.pop("feature_extractor" )
__UpperCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__(self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="max_length" , _UpperCAmelCase="np" , **_UpperCAmelCase ) -> str:
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
__UpperCamelCase : Tuple = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
__UpperCamelCase : List[str] = []
# Maximum number of queries across batch
__UpperCamelCase : List[str] = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
__UpperCamelCase : Any = t + [" "] * (max_num_queries - len(_UpperCAmelCase ))
__UpperCamelCase : int = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
__UpperCamelCase : List[str] = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : int = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__UpperCamelCase : Tuple = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__UpperCamelCase : Any = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
__UpperCamelCase : List[Any] = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__UpperCamelCase : Any = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
__UpperCamelCase : Optional[Any] = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
__UpperCamelCase : Optional[Any] = BatchEncoding()
__UpperCamelCase : Union[str, Any] = input_ids
__UpperCamelCase : List[str] = attention_mask
if query_images is not None:
__UpperCamelCase : str = BatchEncoding()
__UpperCamelCase : Any = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
__UpperCamelCase : List[Any] = query_pixel_values
if images is not None:
__UpperCamelCase : Dict = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
__UpperCamelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__UpperCamelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def a_ (self ) -> Tuple:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _UpperCAmelCase , )
return self.image_processor_class
@property
def a_ (self ) -> Union[str, Any]:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _UpperCAmelCase , )
return self.image_processor
| 298 | 0 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple ,lowercase_ : List[str] ,lowercase_ : Any=1_3 ,lowercase_ : str=7 ,lowercase_ : Optional[Any]=True ,lowercase_ : List[str]=True ,lowercase_ : Optional[Any]=True ,lowercase_ : int=True ,lowercase_ : int=True ,lowercase_ : str=False ,lowercase_ : Tuple=False ,lowercase_ : str=False ,lowercase_ : Optional[Any]=2 ,lowercase_ : Optional[int]=9_9 ,lowercase_ : str=0 ,lowercase_ : Any=3_2 ,lowercase_ : Any=5 ,lowercase_ : List[Any]=4 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Dict=5_1_2 ,lowercase_ : Union[str, Any]=2 ,lowercase_ : Optional[Any]=0.02 ,lowercase_ : List[str]=2 ,lowercase_ : Dict=4 ,lowercase_ : int="last" ,lowercase_ : Tuple=True ,lowercase_ : Union[str, Any]=None ,lowercase_ : str=0 ,):
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[Any] = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Tuple = use_input_lengths
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : Tuple = gelu_activation
lowerCAmelCase__ : Union[str, Any] = sinusoidal_embeddings
lowerCAmelCase__ : List[str] = causal
lowerCAmelCase__ : List[Any] = asm
lowerCAmelCase__ : int = n_langs
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : Optional[Any] = n_special
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : List[str] = num_hidden_layers
lowerCAmelCase__ : Optional[Any] = num_attention_heads
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Optional[Any] = type_sequence_label_size
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : Union[str, Any] = num_labels
lowerCAmelCase__ : Union[str, Any] = num_choices
lowerCAmelCase__ : Optional[Any] = summary_type
lowerCAmelCase__ : List[Any] = use_proj
lowerCAmelCase__ : List[str] = scope
lowerCAmelCase__ : Optional[int] = bos_token_id
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_input_lengths:
lowerCAmelCase__ : Optional[Any] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCAmelCase__ : int = None
if self.use_token_type_ids:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] ,2 ).float()
lowerCAmelCase__ : str = ids_tensor([self.batch_size] ,self.num_choices )
lowerCAmelCase__ : Tuple = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self : List[str] ):
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def __lowerCAmelCase ( self : int ,lowercase_ : int ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : str ,lowercase_ : Dict ,lowercase_ : Tuple ,lowercase_ : Dict ,lowercase_ : List[Any] ,lowercase_ : Union[str, Any] ,):
lowerCAmelCase__ : str = XLMModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : List[str] = model(lowercase_ ,lengths=lowercase_ ,langs=lowercase_ )
lowerCAmelCase__ : Optional[Any] = model(lowercase_ ,langs=lowercase_ )
lowerCAmelCase__ : Tuple = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Any ,lowercase_ : Optional[int] ,lowercase_ : str ,lowercase_ : Any ,lowercase_ : Optional[int] ,lowercase_ : List[Any] ,lowercase_ : Optional[int] ,lowercase_ : Any ,lowercase_ : Dict ,):
lowerCAmelCase__ : Tuple = XLMWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : List[str] = model(lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Any ,lowercase_ : Optional[int] ,lowercase_ : Union[str, Any] ,lowercase_ : str ,lowercase_ : Dict ,lowercase_ : Dict ,lowercase_ : int ,lowercase_ : Optional[Any] ,lowercase_ : Union[str, Any] ,):
lowerCAmelCase__ : Dict = XLMForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : List[str] = model(lowercase_ )
lowerCAmelCase__ : List[Any] = model(lowercase_ ,start_positions=lowercase_ ,end_positions=lowercase_ )
lowerCAmelCase__ : Optional[Any] = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Any ,lowercase_ : Optional[int] ,lowercase_ : Tuple ,lowercase_ : Optional[Any] ,lowercase_ : Dict ,lowercase_ : List[Any] ,lowercase_ : Tuple ,lowercase_ : Any ,lowercase_ : str ,lowercase_ : int ,):
lowerCAmelCase__ : str = XLMForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : str = model(lowercase_ )
lowerCAmelCase__ : int = model(
lowercase_ ,start_positions=lowercase_ ,end_positions=lowercase_ ,cls_index=lowercase_ ,is_impossible=lowercase_ ,p_mask=lowercase_ ,)
lowerCAmelCase__ : Dict = model(
lowercase_ ,start_positions=lowercase_ ,end_positions=lowercase_ ,cls_index=lowercase_ ,is_impossible=lowercase_ ,)
((lowerCAmelCase__) ,) : Optional[int] = result_with_labels.to_tuple()
lowerCAmelCase__ : Tuple = model(lowercase_ ,start_positions=lowercase_ ,end_positions=lowercase_ )
((lowerCAmelCase__) ,) : Dict = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Optional[int] ,lowercase_ : Dict ,lowercase_ : int ,lowercase_ : Any ,lowercase_ : Tuple ,lowercase_ : int ,lowercase_ : Union[str, Any] ,lowercase_ : int ,lowercase_ : Optional[int] ,):
lowerCAmelCase__ : int = XLMForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Dict = model(lowercase_ )
lowerCAmelCase__ : Dict = model(lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Any ,lowercase_ : List[str] ,lowercase_ : List[Any] ,lowercase_ : Tuple ,lowercase_ : Union[str, Any] ,lowercase_ : str ,lowercase_ : str ,lowercase_ : List[str] ,lowercase_ : str ,):
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : int = XLMForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : Optional[int] = model(lowercase_ ,attention_mask=lowercase_ ,labels=lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : str ,lowercase_ : str ,lowercase_ : Optional[int] ,lowercase_ : int ,lowercase_ : Tuple ,lowercase_ : List[Any] ,lowercase_ : List[Any] ,lowercase_ : Optional[Any] ,lowercase_ : Dict ,lowercase_ : Optional[Any] ,):
lowerCAmelCase__ : List[Any] = self.num_choices
lowerCAmelCase__ : Union[str, Any] = XLMForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
lowerCAmelCase__ : str = model(
lowercase_ ,attention_mask=lowercase_ ,token_type_ids=lowercase_ ,labels=lowercase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase__ = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Union[str, Any] ,lowercase_ : Tuple ,lowercase_ : int ,lowercase_ : List[Any] ,lowercase_ : List[str] ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : Any ,lowercase_ : Tuple ,lowercase_ : Optional[Any]=False ):
lowerCAmelCase__ : Tuple = super()._prepare_for_class(lowercase_ ,lowercase_ ,return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowerCAmelCase__ : List[str] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase_ )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=lowercase_ )
return inputs_dict
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Union[str, Any] = XLMModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self ,config_class=lowercase_ ,emb_dim=3_7 )
def __lowerCAmelCase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowercase_ )
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowercase_ )
def __lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowercase_ )
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowercase_ )
def __lowerCAmelCase ( self : List[str] ):
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowercase_ )
def __lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowercase_ )
def __lowerCAmelCase ( self : str ,lowercase_ : Optional[Any] ,lowercase_ : Tuple ,lowercase_ : Tuple ,lowercase_ : Union[str, Any] ,lowercase_ : Dict ,lowercase_ : Union[str, Any]=False ,lowercase_ : Union[str, Any]=1 ):
self.assertIsInstance(lowercase_ ,lowercase_ )
self.assertListEqual(
[isinstance(lowercase_ ,lowercase_ ) for iter_attentions in attentions] ,[True] * len(lowercase_ ) )
self.assertEqual(len(lowercase_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowercase_ ):
# adds PAD dummy token
lowerCAmelCase__ : Any = min_length + idx + 1
lowerCAmelCase__ : Optional[int] = min_length + idx + 1
lowerCAmelCase__ : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(lowercase_ ) )
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : Any ,lowercase_ : Optional[Any] ,lowercase_ : Dict ,lowercase_ : Union[str, Any] ,lowercase_ : Dict ,lowercase_ : Union[str, Any]=False ,lowercase_ : Any=1 ):
self.assertIsInstance(lowercase_ ,lowercase_ )
self.assertListEqual(
[isinstance(lowercase_ ,lowercase_ ) for iter_hidden_states in hidden_states] ,[True] * len(lowercase_ ) ,)
self.assertEqual(len(lowercase_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowercase_ ):
# adds PAD dummy token
lowerCAmelCase__ : Any = min_length + idx + 1
lowerCAmelCase__ : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(lowercase_ ) ,)
pass
@slow
def __lowerCAmelCase ( self : Dict ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : str = XLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowerCAmelCase ( self : Optional[int] ):
lowerCAmelCase__ : str = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(lowercase_ )
lowerCAmelCase__ : int = torch.tensor([[1_4, 4_4_7]] ,dtype=torch.long ,device=lowercase_ ) # the president
lowerCAmelCase__ : List[str] = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowerCAmelCase__ : List[str] = model.generate(lowercase_ ,do_sample=lowercase_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,lowercase_ )
| 74 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = '''▁'''
__UpperCamelCase : Union[str, Any] = {'''vocab_file''': '''spiece.model'''}
__UpperCamelCase : Tuple = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__UpperCamelCase : Optional[Any] = {
'''google/reformer-crime-and-punishment''': 5_2_4_2_8_8,
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ["input_ids", "attention_mask"]
def __init__( self : List[Any] ,lowercase_ : List[str] ,lowercase_ : Optional[int]="</s>" ,lowercase_ : List[Any]="<unk>" ,lowercase_ : Optional[Any]=[] ,lowercase_ : Optional[Dict[str, Any]] = None ,**lowercase_ : int ,):
lowerCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=lowercase_ ,unk_token=lowercase_ ,additional_special_tokens=lowercase_ ,sp_model_kwargs=self.sp_model_kwargs ,**lowercase_ ,)
lowerCAmelCase__ : List[str] = vocab_file
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def __lowerCAmelCase ( self : List[str] ):
return self.sp_model.get_piece_size()
def __lowerCAmelCase ( self : Any ):
lowerCAmelCase__ : Optional[Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
lowerCAmelCase__ : str = self.__dict__.copy()
lowerCAmelCase__ : Any = None
return state
def __setstate__( self : List[str] ,lowercase_ : Any ):
lowerCAmelCase__ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs''' ):
lowerCAmelCase__ : Tuple = {}
lowerCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Dict ,lowercase_ : str ):
return self.sp_model.encode(lowercase_ ,out_type=lowercase_ )
def __lowerCAmelCase ( self : List[Any] ,lowercase_ : int ):
return self.sp_model.piece_to_id(lowercase_ )
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : Dict ):
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : List[Any] = self.sp_model.IdToPiece(lowercase_ )
return token
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : List[Any] ):
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Optional[Any] = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
lowerCAmelCase__ : Dict = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def __lowerCAmelCase ( self : Optional[Any] ,lowercase_ : str ,lowercase_ : Optional[str] = None ):
if not os.path.isdir(lowercase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : List[Any] = os.path.join(
lowercase_ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ ,'''wb''' ) as fi:
lowerCAmelCase__ : Any = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 74 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : str = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 182 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
__a = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
__a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default='''cifar10''' , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'''} , )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''A folder containing the training data.'''} )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''A folder containing the validation data.'''} )
lowerCAmelCase = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
lowerCAmelCase = field(default=32 , metadata={'''help''': '''The size of the square patches to use for masking.'''} )
lowerCAmelCase = field(
default=0.6 , metadata={'''help''': '''Percentage of patches to mask.'''} , )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[int] = {}
if self.train_dir is not None:
UpperCAmelCase_ : List[Any] = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase_ : Tuple = self.validation_dir
UpperCAmelCase_ : int = data_files if data_files else None
@dataclass
class __a:
"""simple docstring"""
lowerCAmelCase = field(
default=_a , metadata={
'''help''': (
'''The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a '''
'''checkpoint identifier on the hub. '''
'''Don\'t set if you want to train a model from scratch.'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(_a )} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'''} , )
lowerCAmelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowerCAmelCase = field(default=_a , metadata={'''help''': '''Name or path of preprocessor config.'''} )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': (
'''The size (resolution) of each image. If not specified, will use `image_size` of the configuration.'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={
'''help''': (
'''The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.'''
)
} , )
lowerCAmelCase = field(
default=_a , metadata={'''help''': '''Stride to use for the encoder.'''} , )
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE=192 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=0.6 ) -> Dict:
UpperCAmelCase_ : Dict = input_size
UpperCAmelCase_ : Union[str, Any] = mask_patch_size
UpperCAmelCase_ : List[Any] = model_patch_size
UpperCAmelCase_ : Dict = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('''Input size must be divisible by mask patch size''' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('''Mask patch size must be divisible by model patch size''' )
UpperCAmelCase_ : List[str] = self.input_size // self.mask_patch_size
UpperCAmelCase_ : Any = self.mask_patch_size // self.model_patch_size
UpperCAmelCase_ : Dict = self.rand_size**2
UpperCAmelCase_ : List[Any] = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self ) -> List[str]:
UpperCAmelCase_ : Any = np.random.permutation(self.token_count )[: self.mask_count]
UpperCAmelCase_ : int = np.zeros(self.token_count ,dtype=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : Optional[int] = mask.reshape((self.rand_size, self.rand_size) )
UpperCAmelCase_ : List[Any] = mask.repeat(self.scale ,axis=0 ).repeat(self.scale ,axis=1 )
return torch.tensor(mask.flatten() )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = torch.stack([example['''pixel_values'''] for example in examples] )
UpperCAmelCase_ : List[str] = torch.stack([example['''mask'''] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_mim''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ : List[str] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase_ : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Initialize our dataset.
UpperCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase_ : str = None if '''validation''' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _lowercase ) and data_args.train_val_split > 0.0:
UpperCAmelCase_ : Optional[int] = ds['''train'''].train_test_split(data_args.train_val_split )
UpperCAmelCase_ : str = split['''train''']
UpperCAmelCase_ : Tuple = split['''test''']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Any = {
'''cache_dir''': model_args.cache_dir,
'''revision''': model_args.model_revision,
'''use_auth_token''': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **_lowercase )
elif model_args.model_name_or_path:
UpperCAmelCase_ : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCAmelCase_ : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(_lowercase , '''decoder_type''' ):
UpperCAmelCase_ : Tuple = '''simmim'''
# adapt config
UpperCAmelCase_ : str = model_args.image_size if model_args.image_size is not None else config.image_size
UpperCAmelCase_ : List[str] = model_args.patch_size if model_args.patch_size is not None else config.patch_size
UpperCAmelCase_ : List[Any] = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'''image_size''': model_args.image_size,
'''patch_size''': model_args.patch_size,
'''encoder_stride''': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase_ : Dict = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_lowercase )
elif model_args.model_name_or_path:
UpperCAmelCase_ : Any = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowercase )
else:
UpperCAmelCase_ : Tuple = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
UpperCAmelCase_ : List[str] = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
UpperCAmelCase_ : Tuple = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('''Training new model from scratch''' )
UpperCAmelCase_ : Union[str, Any] = AutoModelForMaskedImageModeling.from_config(_lowercase )
if training_args.do_train:
UpperCAmelCase_ : str = ds['''train'''].column_names
else:
UpperCAmelCase_ : Union[str, Any] = ds['''validation'''].column_names
if data_args.image_column_name is not None:
UpperCAmelCase_ : Any = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase_ : Tuple = '''image'''
elif "img" in column_names:
UpperCAmelCase_ : Optional[Any] = '''img'''
else:
UpperCAmelCase_ : List[Any] = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
UpperCAmelCase_ : Union[str, Any] = Compose(
[
Lambda(lambda _lowercase : img.convert('''RGB''' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
UpperCAmelCase_ : Union[str, Any] = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(_lowercase ):
UpperCAmelCase_ : Tuple = [transforms(_lowercase ) for image in examples[image_column_name]]
UpperCAmelCase_ : Any = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
UpperCAmelCase_ : Union[str, Any] = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_lowercase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : str = (
ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_lowercase )
# Initialize our trainer
UpperCAmelCase_ : Dict = Trainer(
model=_lowercase , args=_lowercase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , )
# Training
if training_args.do_train:
UpperCAmelCase_ : str = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Tuple = last_checkpoint
UpperCAmelCase_ : Union[str, Any] = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ : Optional[Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
# Write model card and (optionally) push to hub
UpperCAmelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''masked-image-modeling''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''masked-image-modeling'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
if __name__ == "__main__":
main() | 235 |
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = SwinConfig()
UpperCAmelCase_ : Dict = swin_name.split('''_''' )
UpperCAmelCase_ : List[Any] = name_split[1]
UpperCAmelCase_ : Optional[Any] = int(name_split[4] )
UpperCAmelCase_ : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
UpperCAmelCase_ : Tuple = 96
UpperCAmelCase_ : Optional[int] = (2, 2, 6, 2)
UpperCAmelCase_ : str = (3, 6, 12, 24)
elif model_size == "small":
UpperCAmelCase_ : Dict = 96
UpperCAmelCase_ : str = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (3, 6, 12, 24)
elif model_size == "base":
UpperCAmelCase_ : Tuple = 128
UpperCAmelCase_ : Optional[int] = (2, 2, 18, 2)
UpperCAmelCase_ : Optional[int] = (4, 8, 16, 32)
else:
UpperCAmelCase_ : List[str] = 192
UpperCAmelCase_ : str = (2, 2, 18, 2)
UpperCAmelCase_ : Union[str, Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
UpperCAmelCase_ : int = 21841
else:
UpperCAmelCase_ : Tuple = 1000
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : int = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : str = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
UpperCAmelCase_ : Optional[int] = {int(_lowercase ): v for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = idalabel
UpperCAmelCase_ : int = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : Tuple = img_size
UpperCAmelCase_ : List[Any] = num_classes
UpperCAmelCase_ : str = embed_dim
UpperCAmelCase_ : Optional[int] = depths
UpperCAmelCase_ : Dict = num_heads
UpperCAmelCase_ : Optional[int] = window_size
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if "patch_embed.proj" in name:
UpperCAmelCase_ : str = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCAmelCase_ : List[Any] = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCAmelCase_ : Tuple = '''encoder.''' + name
if "attn.proj" in name:
UpperCAmelCase_ : Tuple = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ : int = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ : List[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "norm.weight":
UpperCAmelCase_ : Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
UpperCAmelCase_ : List[str] = '''layernorm.bias'''
if "head" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''head''' , '''classifier''' )
else:
UpperCAmelCase_ : Union[str, Any] = '''swin.''' + name
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : str = orig_state_dict.pop(_lowercase )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase_ : str = key.split('''.''' )
UpperCAmelCase_ : str = int(key_split[1] )
UpperCAmelCase_ : Optional[Any] = int(key_split[3] )
UpperCAmelCase_ : Optional[int] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : Any = val[:dim, :]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[-dim:, :]
else:
UpperCAmelCase_ : Any = val[
:dim
]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2
]
UpperCAmelCase_ : Union[str, Any] = val[
-dim:
]
else:
UpperCAmelCase_ : Optional[Any] = val
return orig_state_dict
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = timm.create_model(_lowercase , pretrained=_lowercase )
timm_model.eval()
UpperCAmelCase_ : Any = get_swin_config(_lowercase )
UpperCAmelCase_ : Union[str, Any] = SwinForImageClassification(_lowercase )
model.eval()
UpperCAmelCase_ : Tuple = convert_state_dict(timm_model.state_dict() , _lowercase )
model.load_state_dict(_lowercase )
UpperCAmelCase_ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swin_name.replace('''_''' , '''-''' ) ) )
UpperCAmelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_lowercase , return_tensors='''pt''' )
UpperCAmelCase_ : Optional[Any] = timm_model(inputs['''pixel_values'''] )
UpperCAmelCase_ : Dict = model(**_lowercase ).logits
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
print(f'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowercase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swin_name',
default='swin_tiny_patch4_window7_224',
type=str,
help='Name of the Swin timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path) | 235 | 1 |
"""simple docstring"""
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
class A_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class A_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = datasets.Audio()
SCREAMING_SNAKE_CASE_ = """audio"""
SCREAMING_SNAKE_CASE_ = AudioFolderConfig
SCREAMING_SNAKE_CASE_ = 42 # definition at the bottom of the script
SCREAMING_SNAKE_CASE_ = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCAmelCase = [
""".aiff""",
""".au""",
""".avr""",
""".caf""",
""".flac""",
""".htk""",
""".svx""",
""".mat4""",
""".mat5""",
""".mpc2k""",
""".ogg""",
""".paf""",
""".pvf""",
""".raw""",
""".rf64""",
""".sd2""",
""".sds""",
""".ircam""",
""".voc""",
""".w64""",
""".wav""",
""".nist""",
""".wavex""",
""".wve""",
""".xi""",
""".mp3""",
""".opus""",
]
lowerCAmelCase = AUDIO_EXTENSIONS | 126 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Path , snake_case_ : str = None , snake_case_ : str = None , snake_case_ : str = None , ) ->List[Any]:
if config_name_or_path is None:
lowerCamelCase__ : Dict ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCamelCase__ : Optional[int] =question_encoder_name_or_path
lowerCamelCase__ : Optional[Any] =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
lowerCamelCase__ : Union[str, Any] =RagConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[Any] =AutoConfig.from_pretrained(snake_case_ )
lowerCamelCase__ : Optional[int] =gen_config
lowerCamelCase__ : str =question_encoder_config
lowerCamelCase__ : str =model_class.from_pretrained_question_encoder_generator(
snake_case_ , snake_case_ , config=snake_case_ )
rag_model.save_pretrained(snake_case_ )
# Sanity check.
model_class.from_pretrained(snake_case_ )
# Save tokenizers.
lowerCamelCase__ : str =AutoTokenizer.from_pretrained(snake_case_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
lowerCamelCase__ : Optional[int] =AutoTokenizer.from_pretrained(snake_case_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 126 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = "The Nymphenburg Palace is a beautiful palace in Munich!"
def __snake_case ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
_UpperCAmelCase : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
_UpperCAmelCase : Any = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=_SCREAMING_SNAKE_CASE , output_all_encodings=_SCREAMING_SNAKE_CASE , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , _SCREAMING_SNAKE_CASE ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
_UpperCAmelCase : str = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
_UpperCAmelCase : Optional[int] = os.path.join(get_home_dir() , "models" )
_UpperCAmelCase : Any = _load_vocab(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , cls=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = nlp.model.BERTModel(
_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=_SCREAMING_SNAKE_CASE , use_token_type_embed=_SCREAMING_SNAKE_CASE , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=_SCREAMING_SNAKE_CASE , use_decoder=_SCREAMING_SNAKE_CASE , )
original_bort.load_parameters(_SCREAMING_SNAKE_CASE , cast_dtype=_SCREAMING_SNAKE_CASE , ignore_extra=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
_UpperCAmelCase : Any = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(_SCREAMING_SNAKE_CASE ),
}
_UpperCAmelCase : List[Any] = BertConfig.from_dict(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = BertForMaskedLM(_SCREAMING_SNAKE_CASE )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(SCREAMING_SNAKE_CASE__ : Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = hf_param.shape
_UpperCAmelCase : Dict = to_torch(params[gluon_param] )
_UpperCAmelCase : Optional[Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
_UpperCAmelCase : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
_UpperCAmelCase : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
_UpperCAmelCase : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
_UpperCAmelCase : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
_UpperCAmelCase : Optional[int] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
_UpperCAmelCase : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
_UpperCAmelCase : BertSelfAttention = layer.attention.self
_UpperCAmelCase : List[str] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
_UpperCAmelCase : Optional[int] = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
_UpperCAmelCase : Optional[int] = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
_UpperCAmelCase : Tuple = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
_UpperCAmelCase : Dict = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
_UpperCAmelCase : Union[str, Any] = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
_UpperCAmelCase : BertSelfOutput = layer.attention.output
_UpperCAmelCase : List[Any] = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
_UpperCAmelCase : Any = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
_UpperCAmelCase : int = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
_UpperCAmelCase : Dict = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
_UpperCAmelCase : BertIntermediate = layer.intermediate
_UpperCAmelCase : Dict = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
_UpperCAmelCase : int = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
_UpperCAmelCase : BertOutput = layer.output
_UpperCAmelCase : List[Any] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
_UpperCAmelCase : List[Any] = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
_UpperCAmelCase : Union[str, Any] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
_UpperCAmelCase : List[Any] = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
_UpperCAmelCase : List[str] = RobertaTokenizer.from_pretrained("roberta-base" )
_UpperCAmelCase : List[Any] = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE )["input_ids"]
# Get gluon output
_UpperCAmelCase : str = mx.nd.array([input_ids] )
_UpperCAmelCase : List[Any] = original_bort(inputs=_SCREAMING_SNAKE_CASE , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = BertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
hf_bort_model.eval()
_UpperCAmelCase : Union[str, Any] = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , return_tensors="pt" )
_UpperCAmelCase : int = hf_bort_model(**_SCREAMING_SNAKE_CASE )[0]
_UpperCAmelCase : List[Any] = output_gluon[0].asnumpy()
_UpperCAmelCase : List[Any] = output_hf[0].detach().numpy()
_UpperCAmelCase : List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
_UpperCAmelCase : Any = np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_lowerCAmelCase : Any = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 351 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] ) -> None:
'''simple docstring'''
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def __snake_case ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ) -> None:
'''simple docstring'''
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
_lowerCAmelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["A", "B", "C"])
generate_all_subsequences(seq)
| 202 | 0 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a ) -> None:
_a : Any = size
_a : Optional[Any] = [0] * size
_a : Optional[Any] = [0] * size
@staticmethod
def __lowercase ( _a ) -> int:
return index | (index + 1)
@staticmethod
def __lowercase ( _a ) -> int:
return (index & (index + 1)) - 1
def __lowercase ( self , _a , _a ) -> None:
_a : Any = value
while index < self.size:
_a : Optional[int] = self.get_prev(_a ) + 1
if current_left_border == index:
_a : int = value
else:
_a : str = max(_a , _a , _a )
_a : Any = self.get_next(_a )
def __lowercase ( self , _a , _a ) -> int:
right -= 1 # Because of right is exclusive
_a : str = 0
while left <= right:
_a : Union[str, Any] = self.get_prev(_a )
if left <= current_left:
_a : List[Any] = max(_a , self.tree[right] )
_a : List[str] = current_left
else:
_a : Tuple = max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = self.task_name.lower()
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "train"
UpperCAmelCase__ : List[str] = "dev"
UpperCAmelCase__ : Union[str, Any] = "test"
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , _a , )
_a : List[str] = args
_a : List[str] = glue_processors[args.task_name]()
_a : List[Any] = glue_output_modes[args.task_name]
if isinstance(_a , _a ):
try:
_a : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_a : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a : Optional[int] = label_list[2], label_list[1]
_a : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
_a : List[Any] = time.time()
_a : str = torch.load(_a )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_a : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a : Any = self.processor.get_test_examples(args.data_dir )
else:
_a : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a : Dict = examples[:limit_length]
_a : Union[str, Any] = glue_convert_examples_to_features(
_a , _a , max_length=args.max_seq_length , label_list=_a , output_mode=self.output_mode , )
_a : Optional[int] = time.time()
torch.save(self.features , _a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
def __lowercase ( self ) -> Tuple:
return self.label_list
| 235 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCamelCase : Union[str, Any] =TypeVar('''T''')
lowerCamelCase : List[Any] =Union[List[T], Tuple[T, ...]]
lowerCamelCase : List[Any] =Union[T, List[T], Dict[str, T]]
lowerCamelCase : Any =Union[str, bytes, os.PathLike] | 196 |
from ..utils import DummyObject, requires_backends
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> int:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Union[str, Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Any:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> Dict:
requires_backends(__lowerCAmelCase , ["torch"] )
def SCREAMING_SNAKE_CASE ( *__lowerCAmelCase , **__lowerCAmelCase ) -> List[Any]:
requires_backends(__lowerCAmelCase , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : int = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Any = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : str , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : str = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Union[str, Any] = ['''torch''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Any , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Union[str, Any] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : str , *SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[Any] = ['''torch''']
def __init__( self : Tuple , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Optional[int] = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : int , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : List[str] = ['''torch''']
def __init__( self : int , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : List[str] , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : List[Any] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Tuple = ['''torch''']
def __init__( self : Any , *SCREAMING_SNAKE_CASE : str , **SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Tuple , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[int] , *SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
class __a ( metaclass=A__ ):
_lowerCAmelCase : Dict = ['''torch''']
def __init__( self : Optional[int] , *SCREAMING_SNAKE_CASE : List[Any] , **SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def __lowercase ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def __lowercase ( cls : Dict , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ["torch"] ) | 196 | 1 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = RobertaPreLayerNormConfig.from_pretrained(
snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) )
A = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
A = tensor_value
A = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ )
model.save_pretrained(snake_case__ )
# convert tokenizer
A = AutoTokenizer.from_pretrained(snake_case__ )
tokenizer.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path) | 74 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ) -> Optional[Any]:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ):
A = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = 'sgugger/tiny-distilbert-classification'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,only_pretrain_model=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,torchscript=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' ,'Cant do half precision' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,fpaa=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
# set architectures equal to `None`
A = None
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' ,'Can\'t do half precision' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = 'sshleifer/tiny-gpt2'
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,fpaa=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = 'sshleifer/tinier_bart'
A = AutoConfig.from_pretrained(A_ )
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ ,configs=[config] )
A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,save_to_csv=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(A_ ,'inf_time.csv' ) ,train_memory_csv_file=os.path.join(A_ ,'train_mem.csv' ) ,inference_memory_csv_file=os.path.join(A_ ,'inf_mem.csv' ) ,train_time_csv_file=os.path.join(A_ ,'train_time.csv' ) ,env_info_csv_file=os.path.join(A_ ,'env.csv' ) ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
benchmark.run()
self.assertTrue(Path(os.path.join(A_ ,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(A_ ,'env.csv' ) ).exists() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(A_ : Optional[int] ):
self.assertTrue(hasattr(A_ ,'sequential' ) )
self.assertTrue(hasattr(A_ ,'cumulative' ) )
self.assertTrue(hasattr(A_ ,'current' ) )
self.assertTrue(hasattr(A_ ,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A = PyTorchBenchmarkArguments(
models=[MODEL_ID] ,training=A_ ,inference=A_ ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(A_ ,'log.txt' ) ,log_print=A_ ,trace_memory_line_by_line=A_ ,multi_process=A_ ,)
A = PyTorchBenchmark(A_ )
A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A_ ,'log.txt' ) ).exists() ) | 74 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
_UpperCamelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
_UpperCamelCase = '''▁'''
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : str = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = AlbertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (
AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase , normalized=__UpperCAmelCase )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else mask_token
)
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Any = do_lower_case
__UpperCAmelCase : Any = remove_space
__UpperCAmelCase : Optional[int] = keep_accents
__UpperCAmelCase : List[str] = vocab_file
__UpperCAmelCase : Dict = False if not self.vocab_file else True
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
__UpperCAmelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Union[str, Any] = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 352 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _A :
def __init__( self , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=64 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : str = np.random.default_rng(__UpperCAmelCase )
__UpperCAmelCase : List[str] = length
__UpperCAmelCase : List[Any] = rng.normal(size=(length,) ).astype(np.floataa )
__UpperCAmelCase : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> int:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
__UpperCAmelCase : Any = True
def __A ( self , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : Optional[int] = False
return x * self.a[0] + self.b[0]
class _A ( torch.nn.Module ):
def __init__( self , __UpperCAmelCase=0 , __UpperCAmelCase=0 , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
__UpperCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : List[str] = torch.nn.Parameter(torch.tensor(__UpperCAmelCase ).float() )
__UpperCAmelCase : str = True
def __A ( self , __UpperCAmelCase=None ) -> Tuple:
'''simple docstring'''
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
__UpperCAmelCase : int = False
return x * self.a + self.b
def lowercase_ ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int = 16 ):
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
__UpperCAmelCase : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__UpperCAmelCase : List[str] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
__UpperCAmelCase : Tuple = load_dataset("""csv""" , data_files=lowerCAmelCase__ )
__UpperCAmelCase : Optional[Any] = datasets["""train"""].unique("""label""" )
__UpperCAmelCase : str = {v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(lowerCAmelCase__ : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : List[Any] = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
if "label" in examples:
__UpperCAmelCase : Optional[Any] = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCAmelCase : Tuple = datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(lowerCAmelCase__ : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[Any] = DataLoader(tokenized_datasets["""train"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
__UpperCAmelCase : List[Any] = DataLoader(tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 16 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a__ = None
a__ = logging.get_logger(__name__)
a__ = '''▁'''
a__ = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a__ = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
a__ = {
'''google/pegasus-xsum''': 512,
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = PegasusTokenizer
UpperCAmelCase__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self , _a=None , _a=None , _a="<pad>" , _a="</s>" , _a="<unk>" , _a="<mask_2>" , _a="<mask_1>" , _a=None , _a=1_0_3 , **_a , ) -> Any:
_a : Any = offset
if additional_special_tokens is not None:
if not isinstance(_a , _a ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_a )}, but is"""
F""" {type(_a )}""" )
_a : Tuple = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_a ) , self.offset - 1 )
]
if len(set(_a ) ) != len(_a ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
_a : List[Any] = additional_special_tokens_extended
else:
_a : List[str] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
_a , tokenizer_file=_a , pad_token=_a , eos_token=_a , unk_token=_a , mask_token=_a , mask_token_sent=_a , offset=_a , additional_special_tokens=_a , **_a , )
_a : Optional[Any] = vocab_file
_a : Any = False if not self.vocab_file else True
def __lowercase ( self , _a ) -> Dict:
_a : List[str] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'''There should be 3 special tokens: mask_token, pad_token, and eos_token +'''
F""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(_a )
elif token_ids_a is None:
return self._special_token_mask(_a ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def __lowercase ( self , _a , _a=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : Tuple = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 235 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
a__ = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
UpperCAmelCase__ : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
UpperCAmelCase__ : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=__lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = self.task_name.lower()
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "train"
UpperCAmelCase__ : List[str] = "dev"
UpperCAmelCase__ : Union[str, Any] = "test"
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : GlueDataTrainingArguments
UpperCAmelCase__ : str
UpperCAmelCase__ : List[InputFeatures]
def __init__( self , _a , _a , _a = None , _a = Split.train , _a = None , ) -> str:
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , _a , )
_a : List[str] = args
_a : List[str] = glue_processors[args.task_name]()
_a : List[Any] = glue_output_modes[args.task_name]
if isinstance(_a , _a ):
try:
_a : Any = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
_a : str = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a : Optional[int] = label_list[2], label_list[1]
_a : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_a ):
if os.path.exists(_a ) and not args.overwrite_cache:
_a : List[Any] = time.time()
_a : str = torch.load(_a )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
_a : str = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a : Any = self.processor.get_test_examples(args.data_dir )
else:
_a : List[str] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a : Dict = examples[:limit_length]
_a : Union[str, Any] = glue_convert_examples_to_features(
_a , _a , max_length=args.max_seq_length , label_list=_a , output_mode=self.output_mode , )
_a : Optional[int] = time.time()
torch.save(self.features , _a )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self ) -> Optional[int]:
return len(self.features )
def __getitem__( self , _a ) -> InputFeatures:
return self.features[i]
def __lowercase ( self ) -> Tuple:
return self.label_list
| 235 | 1 |
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase : Optional[int] = {
"""configuration_cpmant""": ["""CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CpmAntConfig"""],
"""tokenization_cpmant""": ["""CpmAntTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] = [
"""CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CpmAntForCausalLM""",
"""CpmAntModel""",
"""CpmAntPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 352 |
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase : Any = """path-to-your-trained-model"""
lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase : Union[str, Any] = """A photo of sks dog in a bucket"""
lowerCAmelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 127 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _lowerCamelCase ( lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray , lowercase : int , lowercase : int ) -> np.ndarray:
_a = cva.getAffineTransform(lowercase , lowercase )
return cva.warpAffine(lowercase , lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCAmelCase_ : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
lowerCAmelCase_ : Optional[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = gray_img.shape
# set different points to rotate image
lowerCAmelCase_ : List[str] = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
lowerCAmelCase_ : Any = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
lowerCAmelCase_ : Optional[Any] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
lowerCAmelCase_ : Tuple = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
lowerCAmelCase_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase_ : Optional[int] = plt.figure(1)
lowerCAmelCase_ : int = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 63 |
"""simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def __magic_name__ ( __snake_case : List[str] , __snake_case : Tuple , __snake_case : Tuple ) -> int:
lowercase : Union[str, Any] = OmegaConf.load(__snake_case )
lowercase : int = torch.load(__snake_case , map_location="cpu" )["model"]
lowercase : Optional[Any] = list(state_dict.keys() )
# extract state_dict for VQVAE
lowercase : Optional[int] = {}
lowercase : Union[str, Any] = "first_stage_model."
for key in keys:
if key.startswith(__snake_case ):
lowercase : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
lowercase : Optional[int] = {}
lowercase : List[Any] = "model.diffusion_model."
for key in keys:
if key.startswith(__snake_case ):
lowercase : Optional[Any] = state_dict[key]
lowercase : Dict = config.model.params.first_stage_config.params
lowercase : List[str] = config.model.params.unet_config.params
lowercase : Union[str, Any] = VQModel(**__snake_case ).eval()
vqvae.load_state_dict(__snake_case )
lowercase : List[Any] = UNetLDMModel(**__snake_case ).eval()
unet.load_state_dict(__snake_case )
lowercase : Dict = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=__snake_case , )
lowercase : Optional[Any] = LDMPipeline(__snake_case , __snake_case , __snake_case )
pipeline.save_pretrained(__snake_case )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
_A : Dict = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 202 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE_ = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
SCREAMING_SNAKE_CASE_ = {
"""distilbert-base-uncased""": 5_1_2,
"""distilbert-base-uncased-distilled-squad""": 5_1_2,
"""distilbert-base-cased""": 5_1_2,
"""distilbert-base-cased-distilled-squad""": 5_1_2,
"""distilbert-base-german-cased""": 5_1_2,
"""distilbert-base-multilingual-cased""": 5_1_2,
}
SCREAMING_SNAKE_CASE_ = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Any = VOCAB_FILES_NAMES
__snake_case : int = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Any = PRETRAINED_INIT_CONFIGURATION
__snake_case : int = ["input_ids", "attention_mask"]
__snake_case : Optional[Any] = DistilBertTokenizer
def __init__( self : Any ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : List[Any]="[UNK]" ,lowerCamelCase__ : List[Any]="[SEP]" ,lowerCamelCase__ : List[Any]="[PAD]" ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : str="[MASK]" ,lowerCamelCase__ : Optional[int]=True ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : int ,) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,tokenize_chinese_chars=lowerCamelCase__ ,strip_accents=lowerCamelCase__ ,**lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,lowerCamelCase__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,lowerCamelCase__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,lowerCamelCase__ ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE = getattr(lowerCamelCase__ ,normalizer_state.pop("""type""" ) )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = strip_accents
SCREAMING_SNAKE_CASE = tokenize_chinese_chars
SCREAMING_SNAKE_CASE = normalizer_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = do_lower_case
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int ,lowerCamelCase__ : Any=None ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ )
return tuple(lowerCamelCase__ )
| 193 |
from PIL import Image
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
'''simple docstring'''
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE_ = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 193 | 1 |
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def snake_case_ ( snake_case ) -> Dict: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def snake_case_ ( ) -> List[Any]:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
lowercase__: Optional[Any] = [1, 2, 3]
with pytest.raises(snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(snake_case , snake_case , num_proc=2 )
with pytest.raises(snake_case ):
with parallel_backend('unsupported backend' ):
map_nested(snake_case , snake_case , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def snake_case_ ( snake_case ) -> List[str]:
lowercase__: Optional[Any] = [1, 2]
lowercase__: Optional[Any] = {'a': 1, 'b': 2}
lowercase__: List[str] = {'a': [1, 2], 'b': [3, 4]}
lowercase__: str = {'a': {'1': 1}, 'b': 2}
lowercase__: Union[str, Any] = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
lowercase__: Union[str, Any] = [2, 3]
lowercase__: int = {'a': 2, 'b': 3}
lowercase__: Tuple = {'a': [2, 3], 'b': [4, 5]}
lowercase__: Optional[Any] = {'a': {'1': 2}, 'b': 3}
lowercase__: Union[str, Any] = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
assert map_nested(snake_case , snake_case , num_proc=snake_case ) == expected_map_nested_sa
| 196 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Tuple = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: List[str] = PegasusTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[Any] = '</s>'
lowercase__: Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(lowerCAmelCase__ ) , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Optional[Any] = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
lowercase__: Dict = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Tuple = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
lowercase__: int = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__: Any = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
lowercase__: Union[str, Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: int = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Optional[int] = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
lowercase__: int = 'To ensure a smooth flow of bank resolutions.'
lowercase__: Any = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
lowercase__: str = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = ['This is going to be way too long.' * 150, 'short example']
lowercase__: Tuple = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Dict = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Any = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
# fmt: off
lowercase__: List[str] = {'input_ids': [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : int = PegasusTokenizer
__lowercase : Any = PegasusTokenizerFast
__lowercase : Any = True
__lowercase : Dict = True
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__: Union[str, Any] = PegasusTokenizer(lowerCAmelCase__ , offset=0 , mask_token_sent=lowerCAmelCase__ , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def SCREAMING_SNAKE_CASE__ ( self , **lowerCAmelCase__ ) -> PegasusTokenizer:
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__: str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: str = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__: Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
lowercase__: List[Any] = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
lowercase__: Any = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids[0]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
lowercase__: List[Any] = ['This is going to be way too long.' * 1_000, 'short example']
lowercase__: str = ['not super long but more than 5 tokens', 'tiny']
lowercase__: Tuple = self._large_tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
lowercase__: Dict = self._large_tokenizer(
text_target=lowerCAmelCase__ , max_length=5 , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase__ ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: str = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
lowercase__: Optional[int] = self._large_tokenizer(lowerCAmelCase__ ).input_ids
self.assertListEqual(
lowerCAmelCase__ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 196 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase__ : List[str] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
UpperCAmelCase__ : Dict = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
UpperCAmelCase__ : Dict = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowerCamelCase__ ( a , a ) -> Optional[int]:
return float((preds == labels).mean() )
def lowerCamelCase__ ( a , a ) -> List[str]:
_A: Any = simple_accuracy(a , a )
_A: Dict = float(fa_score(y_true=a , y_pred=a ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase__ ( a , a ) -> Union[str, Any]:
_A: Any = np.array(a )
_A: Optional[Any] = np.array(a )
_A: Optional[Any] = en_sentvecs.shape[0]
# mean centering
_A: List[Any] = en_sentvecs - np.mean(a , axis=0 )
_A: Optional[int] = in_sentvecs - np.mean(a , axis=0 )
_A: str = cdist(a , a , '''cosine''' )
_A: str = np.array(range(a ) )
_A: Optional[int] = sim.argsort(axis=1 )[:, :10]
_A: Tuple = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__ ( self : Any , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase_ , lowerCAmelCase_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 301 |
from __future__ import annotations
UpperCAmelCase__ : List[str] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase__ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase__ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase__ ( a , a , a , a ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase__ ( a ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase__ ( a ) -> Matrix | None:
if location := find_empty_location(a ):
_A , _A: Optional[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(a , a , a , a ):
_A: str = digit
if sudoku(a ) is not None:
return grid
_A: Tuple = 0
return None
def lowerCamelCase__ ( a ) -> None:
for row in grid:
for cell in row:
print(a , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
UpperCAmelCase__ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 301 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 0):
'''simple docstring'''
__A : int = row, column
__A : Any = [[default_value for c in range(_snake_case)] for r in range(_snake_case)]
def __str__( self):
'''simple docstring'''
__A : List[str] = F'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__A : Optional[int] = 0
for row_vector in self.array:
for obj in row_vector:
__A : str = max(_snake_case , len(str(_snake_case)))
__A : Tuple = F'%{max_element_length}s'
# Make string and return
def single_line(_UpperCAmelCase) -> str:
nonlocal string_format_identifier
__A : Dict = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(_snake_case) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not (isinstance(_snake_case , (list, tuple)) and len(_snake_case) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _UpperCAmelCase):
'''simple docstring'''
assert self.validate_indicies(_snake_case)
return self.array[loc[0]][loc[1]]
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
assert self.validate_indicies(_snake_case)
__A : Any = value
def __add__( self , _UpperCAmelCase):
'''simple docstring'''
assert isinstance(_snake_case , _snake_case)
assert self.row == another.row and self.column == another.column
# Add
__A : List[Any] = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__A : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
__A : Dict = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__A : List[str] = -self[r, c]
return result
def __sub__( self , _UpperCAmelCase):
'''simple docstring'''
return self + (-another)
def __mul__( self , _UpperCAmelCase):
'''simple docstring'''
if isinstance(_snake_case , (int, float)): # Scalar multiplication
__A : str = Matrix(self.row , self.column)
for r in range(self.row):
for c in range(self.column):
__A : Tuple = self[r, c] * another
return result
elif isinstance(_snake_case , _snake_case): # Matrix multiplication
assert self.column == another.row
__A : Any = Matrix(self.row , another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__A : Union[str, Any] = F'Unsupported type given for another ({type(_snake_case)})'
raise TypeError(_snake_case)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = Matrix(self.column , self.row)
for r in range(self.row):
for c in range(self.column):
__A : Dict = self[r, c]
return result
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
assert isinstance(_snake_case , _snake_case) and isinstance(_snake_case , _snake_case)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__A : Dict = v.transpose()
__A : Union[str, Any] = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _lowerCAmelCase ( ) -> None:
# a^(-1)
__A : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
__A : List[Any] = 1
print(f'a^(-1) is {ainv}' )
# u, v
__A : Tuple = Matrix(3 , 1 , 0 )
__A : Dict = 1, 2, -3
__A : List[Any] = Matrix(3 , 1 , 0 )
__A : str = 4, -2, 5
print(f'u is {u}' )
print(f'v is {v}' )
print(f'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowerCamelCase , __lowerCamelCase )}' )
def _lowerCAmelCase ( ) -> None:
import doctest
doctest.testmod()
testa() | 190 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_speecht5': [
'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP',
'SpeechT5Config',
'SpeechT5HifiGanConfig',
],
'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'],
'processing_speecht5': ['SpeechT5Processor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['SpeechT5Tokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'SpeechT5ForSpeechToText',
'SpeechT5ForSpeechToSpeech',
'SpeechT5ForTextToSpeech',
'SpeechT5Model',
'SpeechT5PreTrainedModel',
'SpeechT5HifiGan',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCamelCase__ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str = """token-classification"""
def __init__( self , __lowerCAmelCase ):
if type(__lowerCAmelCase ) == dict:
UpperCamelCase__ = Namespace(**__lowerCAmelCase )
UpperCamelCase__ = import_module("""tasks""" )
try:
UpperCamelCase__ = getattr(__lowerCAmelCase , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(__lowerCAmelCase , len(self.labels ) , self.mode )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return self.model(**__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , __lowerCAmelCase )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
__lowerCAmelCase , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=__lowerCAmelCase , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
UpperCamelCase__ = self._feature_file(__lowerCAmelCase )
logger.info("""Loading features from cached file %s""" , __lowerCAmelCase )
UpperCamelCase__ = torch.load(__lowerCAmelCase )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , batch_size=__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""Compute validation""" ""
UpperCamelCase__ = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(__lowerCAmelCase , axis=2 )
UpperCamelCase__ = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(__lowerCAmelCase , __lowerCAmelCase ),
"""precision""": precision_score(__lowerCAmelCase , __lowerCAmelCase ),
"""recall""": recall_score(__lowerCAmelCase , __lowerCAmelCase ),
"""f1""": fa_score(__lowerCAmelCase , __lowerCAmelCase ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _lowerCamelCase ( self , __lowerCAmelCase ):
# when stable
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
UpperCamelCase__ = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCamelCase ( self , __lowerCAmelCase ):
# updating to test_epoch_end instead of deprecated test_end
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(__lowerCAmelCase )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase ):
# Add NER specific options
BaseTransformer.add_model_specific_args(__lowerCAmelCase , __lowerCAmelCase )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=__lowerCAmelCase , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=__lowerCAmelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=__lowerCAmelCase , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowerCAmelCase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCamelCase__ = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = NERTransformer(args)
UpperCamelCase__ = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCamelCase__ = sorted(glob.glob(os.path.join(args.output_dir, "checkpoint-epoch=*.ckpt"), recursive=True))
UpperCamelCase__ = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 365 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _UpperCamelCase (a__ :Dict[str, torch.Tensor] ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = []
for rt in rc.restypes:
UpperCamelCase__ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCamelCase__ = {name: i for i, name in enumerate(a__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCamelCase__ = torch.tensor(
a__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
a__ , dtype=torch.intaa , device=protein["""aatype"""].device , )
UpperCamelCase__ = torch.tensor(
a__ , dtype=torch.floataa , device=protein["""aatype"""].device , )
UpperCamelCase__ = protein["""aatype"""].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCamelCase__ = restype_atomaa_to_atomaa[protein_aatype]
UpperCamelCase__ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCamelCase__ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["""aatype"""].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCamelCase__ = rc.restype_atoa[restype_letter]
UpperCamelCase__ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCamelCase__ = rc.atom_order[atom_name]
UpperCamelCase__ = 1
UpperCamelCase__ = restype_atomaa_mask[protein_aatype]
UpperCamelCase__ = residx_atomaa_mask
return protein
def _UpperCamelCase (a__ :Dict[str, torch.Tensor] ):
"""simple docstring"""
UpperCamelCase__ = tree_map(lambda a__ : torch.tensor(a__ , device=batch["""aatype"""].device ) , a__ , np.ndarray )
UpperCamelCase__ = tensor_tree_map(lambda a__ : np.array(a__ ) , make_atomaa_masks(a__ ) )
return out
| 87 | 0 |
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
A: Optional[int] = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
A: int = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
A: Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric('rouge')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
'''simple docstring'''
if rouge_types is None:
UpperCAmelCase : int = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
UpperCAmelCase : str = rouge_scorer.RougeScorer(rouge_types=_SCREAMING_SNAKE_CASE , use_stemmer=_SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCAmelCase : Optional[int] = scoring.BootstrapAggregator()
else:
UpperCAmelCase : Tuple = []
for ref, pred in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : int = scorer.score(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if use_aggregator:
aggregator.add_scores(_SCREAMING_SNAKE_CASE )
else:
scores.append(_SCREAMING_SNAKE_CASE )
if use_aggregator:
UpperCAmelCase : Tuple = aggregator.aggregate()
else:
UpperCAmelCase : List[Any] = {}
for key in scores[0]:
UpperCAmelCase : Optional[Any] = [score[key] for score in scores]
return result
| 109 |
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case = deprecated_arg[3:]
setattr(self , __snake_case , not kwargs.pop(__snake_case ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case = kwargs.pop('''torchscript''' , self.torchscript )
snake_case = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
snake_case = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__snake_case )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Trace the models using torchscript'} )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__magic_name__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
snake_case = torch.device('''cpu''' )
snake_case = 0
elif is_torch_tpu_available():
snake_case = xm.xla_device()
snake_case = 0
else:
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def a_ ( self ):
return is_torch_tpu_available() and self.tpu
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a_ ( self ):
return self.n_gpu > 0
| 127 | 0 |
"""simple docstring"""
def _a ( ) -> int:
return [
a * b * (1_000 - a - b)
for a in range(1 , 999 )
for b in range(_SCREAMING_SNAKE_CASE , 999 )
if (a * a + b * b == (1_000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 354 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 233 | 0 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableDiffusionDiffEditPipeline
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE = frozenset([] )
def UpperCamelCase ( self ):
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(32, 64),layers_per_block=2,sample_size=32,in_channels=4,out_channels=4,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''),up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''),cross_attention_dim=32,attention_head_dim=(2, 4),use_linear_projection=__lowerCamelCase,)
A__ = DDIMScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule='''scaled_linear''',clip_sample=__lowerCamelCase,set_alpha_to_one=__lowerCamelCase,)
A__ = DDIMInverseScheduler(
beta_start=0.00085,beta_end=0.012,beta_schedule='''scaled_linear''',clip_sample=__lowerCamelCase,set_alpha_to_zero=__lowerCamelCase,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[32, 64],in_channels=3,out_channels=3,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],latent_channels=4,sample_size=128,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=32,intermediate_size=37,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1000,hidden_act='''gelu''',projection_dim=512,)
A__ = CLIPTextModel(__lowerCamelCase )
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
A__ = floats_tensor((1, 16, 16),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
A__ = floats_tensor((1, 2, 4, 16, 16),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__lowerCamelCase )
else:
A__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
A__ = floats_tensor((1, 3, 32, 32),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
A__ = image.cpu().permute(0,2,3,1 )[0]
A__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' )
if str(__lowerCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__lowerCamelCase )
else:
A__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=0 ):
A__ = floats_tensor((1, 3, 32, 32),rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
A__ = image.cpu().permute(0,2,3,1 )[0]
A__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert('''RGB''' )
if str(__lowerCamelCase ).startswith('''mps''' ):
A__ = torch.manual_seed(__lowerCamelCase )
else:
A__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
A__ = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self ):
if not hasattr(self.pipeline_class,'''_optional_components''' ):
return
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
A__ = self.get_dummy_inputs(__lowerCamelCase )
A__ = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
A__ = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCamelCase,__lowerCamelCase ) is None,f"`{optional_component}` did not stay set to None after loading.",)
A__ = self.get_dummy_inputs(__lowerCamelCase )
A__ = pipe_loaded(**__lowerCamelCase )[0]
A__ = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCamelCase,1E-4 )
def UpperCamelCase ( self ):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = self.get_dummy_mask_inputs(__lowerCamelCase )
A__ = pipe.generate_mask(**__lowerCamelCase )
A__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape,(1, 16, 16) )
A__ = np.array([0] * 9 )
A__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase,1E-3 )
self.assertEqual(mask[0, -3, -4],0 )
def UpperCamelCase ( self ):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
A__ = pipe.invert(**__lowerCamelCase ).images
A__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape,(2, 32, 32, 3) )
A__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],)
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase,1E-3 )
def UpperCamelCase ( self ):
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCamelCase ( self ):
A__ = '''cpu'''
A__ = self.get_dummy_components()
A__ = {'''beta_start''': 0.00085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
A__ = DPMSolverMultistepScheduler(**__lowerCamelCase )
A__ = DPMSolverMultistepInverseScheduler(**__lowerCamelCase )
A__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
A__ = pipe.invert(**__lowerCamelCase ).images
A__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape,(2, 32, 32, 3) )
A__ = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799],)
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase,1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCamelCase ( cls ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
A__ = raw_image.convert('''RGB''' ).resize((768, 768) )
A__ = raw_image
def UpperCamelCase ( self ):
A__ = torch.manual_seed(0 )
A__ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''',safety_checker=__lowerCamelCase,torch_dtype=torch.floataa )
A__ = DDIMScheduler.from_config(pipe.scheduler.config )
A__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''a bowl of fruit'''
A__ = '''a bowl of pears'''
A__ = pipe.generate_mask(
image=self.raw_image,source_prompt=__lowerCamelCase,target_prompt=__lowerCamelCase,generator=__lowerCamelCase,)
A__ = pipe.invert(
prompt=__lowerCamelCase,image=self.raw_image,inpaint_strength=0.7,generator=__lowerCamelCase ).latents
A__ = pipe(
prompt=__lowerCamelCase,mask_image=__lowerCamelCase,image_latents=__lowerCamelCase,generator=__lowerCamelCase,negative_prompt=__lowerCamelCase,inpaint_strength=0.7,output_type='''numpy''',).images[0]
A__ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCamelCase ( self ):
A__ = torch.manual_seed(0 )
A__ = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''',safety_checker=__lowerCamelCase,torch_dtype=torch.floataa )
A__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''a bowl of fruit'''
A__ = '''a bowl of pears'''
A__ = pipe.generate_mask(
image=self.raw_image,source_prompt=__lowerCamelCase,target_prompt=__lowerCamelCase,generator=__lowerCamelCase,)
A__ = pipe.invert(
prompt=__lowerCamelCase,image=self.raw_image,inpaint_strength=0.7,generator=__lowerCamelCase,num_inference_steps=25,).latents
A__ = pipe(
prompt=__lowerCamelCase,mask_image=__lowerCamelCase,image_latents=__lowerCamelCase,generator=__lowerCamelCase,negative_prompt=__lowerCamelCase,inpaint_strength=0.7,num_inference_steps=25,output_type='''numpy''',).images[0]
A__ = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 193 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Any="cosine" , )->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : Optional[int] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : str ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = [e.name for e in KarrasDiffusionSchedulers]
__SCREAMING_SNAKE_CASE = 2
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.00085,__lowerCamelCase = 0.012,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = "epsilon",__lowerCamelCase = False,__lowerCamelCase = False,__lowerCamelCase = 1.0,__lowerCamelCase = "linspace",__lowerCamelCase = 0,):
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
A__ = betas_for_alpha_bar(__lowerCamelCase,alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# set all values
self.set_timesteps(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = use_karras_sigmas
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase=None ):
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__lowerCamelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,):
A__ = self.index_for_timestep(__lowerCamelCase )
A__ = self.sigmas[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None,):
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0,num_train_timesteps - 1,__lowerCamelCase,dtype=__lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(__lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__lowerCamelCase,0,-step_ratio )).round().copy().astype(__lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = np.log(__lowerCamelCase )
A__ = np.interp(__lowerCamelCase,np.arange(0,len(__lowerCamelCase ) ),__lowerCamelCase )
if self.config.use_karras_sigmas:
A__ = self._convert_to_karras(in_sigmas=__lowerCamelCase,num_inference_steps=self.num_inference_steps )
A__ = np.array([self._sigma_to_t(__lowerCamelCase,__lowerCamelCase ) for sigma in sigmas] )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__lowerCamelCase ).to(device=__lowerCamelCase )
A__ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.from_numpy(__lowerCamelCase )
A__ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__lowerCamelCase ).startswith('''mps''' ):
# mps does not support float64
A__ = timesteps.to(__lowerCamelCase,dtype=torch.floataa )
else:
A__ = timesteps.to(device=__lowerCamelCase )
# empty dt and derivative
A__ = None
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
# get log sigma
A__ = np.log(__lowerCamelCase )
# get distribution
A__ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A__ = np.cumsum((dists >= 0),axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = log_sigmas[low_idx]
A__ = log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = np.clip(__lowerCamelCase,0,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.reshape(sigma.shape )
return t
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = in_sigmas[-1].item()
A__ = in_sigmas[0].item()
A__ = 7.0 # 7.0 is the value used in the paper
A__ = np.linspace(0,1,__lowerCamelCase )
A__ = sigma_min ** (1 / rho)
A__ = sigma_max ** (1 / rho)
A__ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCamelCase ( self ):
return self.dt is None
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = True,):
A__ = self.index_for_timestep(__lowerCamelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_next
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A__ = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_next - sigma_hat
# store for 2nd order step
A__ = derivative
A__ = dt
A__ = sample
else:
# 2. 2nd order / Heun's method
A__ = (sample - pred_original_sample) / sigma_next
A__ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A__ = self.dt
A__ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A__ = None
A__ = None
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__lowerCamelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__lowerCamelCase,__lowerCamelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 193 | 1 |
def lowerCAmelCase__ ( lowerCamelCase : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360 |
'''simple docstring'''
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple=None):
_A : Any = data
_A : Optional[Any] = None
def __repr__( self : List[str]):
_A : List[Any] = []
_A : Any = self
while temp:
string_rep.append(F'{temp.data}')
_A : List[Any] = temp.next
return "->".join(SCREAMING_SNAKE_CASE)
def lowerCAmelCase__ ( lowerCamelCase : list ):
if not elements_list:
raise Exception('The Elements List is empty' )
_A : Union[str, Any] = Node(elements_list[0] )
for i in range(1 ,len(lowerCamelCase ) ):
_A : Dict = Node(elements_list[i] )
_A : int = current.next
return head
def lowerCAmelCase__ ( lowerCamelCase : Node ):
if head_node is not None and isinstance(lowerCamelCase ,lowerCamelCase ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCAmelCase__ ( ):
from doctest import testmod
testmod()
_A : List[str] = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCamelCase )
print('Elements in Reverse:' )
print_reverse(lowerCamelCase )
if __name__ == "__main__":
main()
| 227 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = LEDTokenizer
_snake_case = LEDTokenizerFast
_snake_case = True
def A__ ( self ) -> Tuple:
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def A__ ( self , **snake_case_ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , **snake_case_ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , snake_case_ ) -> str:
return "lower newer", "lower newer"
@cached_property
def A__ ( self ) -> List[Any]:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def A__ ( self ) -> Union[str, Any]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def A__ ( self ) -> Dict:
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
@require_torch
def A__ ( self ) -> Optional[int]:
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , snake_case_ )
self.assertIn("""attention_mask""" , snake_case_ )
self.assertNotIn("""labels""" , snake_case_ )
self.assertNotIn("""decoder_attention_mask""" , snake_case_ )
@require_torch
def A__ ( self ) -> Tuple:
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=snake_case_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def A__ ( self ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors="""pt""" )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 5_122) )
@require_torch
def A__ ( self ) -> Dict:
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(snake_case_ , return_tensors="""pt""" )
__lowerCAmelCase = tokenizer(text_target=snake_case_ , return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def A__ ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = ["""Summary of the text.""", """Another summary."""]
__lowerCAmelCase = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ )
__lowerCAmelCase = [[0] * len(snake_case_ ) for x in encoded_output["""input_ids"""]]
__lowerCAmelCase = tokenizer.pad(snake_case_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , snake_case_ )
def A__ ( self ) -> Optional[Any]:
pass
def A__ ( self ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
__lowerCAmelCase = tokenizer_p.encode_plus(snake_case_ , add_special_tokens=snake_case_ , return_token_type_ids=snake_case_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
snake_case_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 301 |
"""simple docstring"""
import math
def lowercase (_lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowercase (_lowerCAmelCase = 0.1 ):
__lowerCAmelCase = 3
__lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(_lowerCAmelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def SCREAMING_SNAKE_CASE_ ( __A : Any , __A : str ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = checkpoint
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_in.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_in.bias"]
_SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_out.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["encoder.conv_out.bias"]
_SCREAMING_SNAKE_CASE = vae_state_dict["encoder.norm_out.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["encoder.norm_out.bias"]
_SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_in.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_in.bias"]
_SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_out.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["decoder.conv_out.bias"]
_SCREAMING_SNAKE_CASE = vae_state_dict["decoder.norm_out.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["decoder.norm_out.bias"]
_SCREAMING_SNAKE_CASE = vae_state_dict["quant_conv.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["quant_conv.bias"]
_SCREAMING_SNAKE_CASE = vae_state_dict["post_quant_conv.weight"]
_SCREAMING_SNAKE_CASE = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
_SCREAMING_SNAKE_CASE = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
_SCREAMING_SNAKE_CASE = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(lowercase_ )
}
# Retrieves the keys for the decoder up blocks only
_SCREAMING_SNAKE_CASE = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
_SCREAMING_SNAKE_CASE = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(lowercase_ )
}
for i in range(lowercase_ ):
_SCREAMING_SNAKE_CASE = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
_SCREAMING_SNAKE_CASE = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
_SCREAMING_SNAKE_CASE = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
_SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(lowercase_ )
_SCREAMING_SNAKE_CASE = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "encoder.mid.block" in key]
_SCREAMING_SNAKE_CASE = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
_SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(lowercase_ )
_SCREAMING_SNAKE_CASE = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "encoder.mid.attn" in key]
_SCREAMING_SNAKE_CASE = renew_vae_attention_paths(lowercase_ )
_SCREAMING_SNAKE_CASE = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
conv_attn_to_linear(lowercase_ )
for i in range(lowercase_ ):
_SCREAMING_SNAKE_CASE = num_up_blocks - 1 - i
_SCREAMING_SNAKE_CASE = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
_SCREAMING_SNAKE_CASE = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
_SCREAMING_SNAKE_CASE = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
_SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(lowercase_ )
_SCREAMING_SNAKE_CASE = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "decoder.mid.block" in key]
_SCREAMING_SNAKE_CASE = 2
for i in range(1 , num_mid_res_blocks + 1 ):
_SCREAMING_SNAKE_CASE = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
_SCREAMING_SNAKE_CASE = renew_vae_resnet_paths(lowercase_ )
_SCREAMING_SNAKE_CASE = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
_SCREAMING_SNAKE_CASE = [key for key in vae_state_dict if "decoder.mid.attn" in key]
_SCREAMING_SNAKE_CASE = renew_vae_attention_paths(lowercase_ )
_SCREAMING_SNAKE_CASE = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(lowercase_ , lowercase_ , lowercase_ , additional_replacements=[meta_path] , config=lowercase_ )
conv_attn_to_linear(lowercase_ )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str , ) -> str:
# Only support V1
_SCREAMING_SNAKE_CASE = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
_SCREAMING_SNAKE_CASE = io.BytesIO(r.content )
_SCREAMING_SNAKE_CASE = OmegaConf.load(lowercase_ )
_SCREAMING_SNAKE_CASE = 5_12
_SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
_SCREAMING_SNAKE_CASE = {}
with safe_open(lowercase_ , framework="pt" , device="cpu" ) as f:
for key in f.keys():
_SCREAMING_SNAKE_CASE = f.get_tensor(lowercase_ )
else:
_SCREAMING_SNAKE_CASE = torch.load(lowercase_ , map_location=lowercase_ )["state_dict"]
# Convert the VAE model.
_SCREAMING_SNAKE_CASE = create_vae_diffusers_config(lowercase_ , image_size=lowercase_ )
_SCREAMING_SNAKE_CASE = custom_convert_ldm_vae_checkpoint(lowercase_ , lowercase_ )
_SCREAMING_SNAKE_CASE = AutoencoderKL(**lowercase_ )
vae.load_state_dict(lowercase_ )
vae.save_pretrained(lowercase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
lowerCamelCase_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 357 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['LayoutLMv2FeatureExtractor']
lowerCamelCase_ = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 111 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_lowercase : Union[str, Any] =logging.getLogger(__name__)
_lowercase : List[str] ="pytorch_model.bin"
@dataclasses.dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowerCAmelCase :Optional[str] = dataclasses.field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowerCAmelCase :str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowerCAmelCase :Optional[str] = dataclasses.field(
default=__A , metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCAmelCase :Optional[str] = dataclasses.field(
default=__A , metadata={"help": "The name of the task to train on."} , )
__lowerCAmelCase :Optional[List[str]] = dataclasses.field(
default=__A , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class snake_case__ :
"""simple docstring"""
__lowerCAmelCase :str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowerCAmelCase :Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
__lowerCAmelCase :Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
__lowerCAmelCase :Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowerCAmelCase :Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
__lowerCAmelCase :Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
__lowerCAmelCase :Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
__lowerCAmelCase :Optional[bool] = dataclasses.field(
default=__A , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
__lowerCAmelCase :Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
__lowerCAmelCase :Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
__lowerCAmelCase :Optional[int] = dataclasses.field(
default=__A , metadata={"help": "Random seed for initialization."} , )
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : List[str] , _lowercase : Tuple , _lowercase : str , _lowercase : Any , _lowercase : List[Any]) -> Dict:
"""simple docstring"""
a__ : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1)
if args.do_filter_by_confidence:
a__ : Dict = dataset.filter(lambda _lowercase: example["probability"] > args.confidence_threshold)
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
a__ : int = int(eval_result * len(_lowerCamelCase))
print(_lowerCamelCase)
a__ : str = dataset.sort("""probability""" , reverse=_lowerCamelCase)
a__ : str = dataset.select(range(_lowerCamelCase))
a__ : Optional[Any] = dataset.remove_columns(["""label""", """probability"""])
a__ : Any = dataset.rename_column("""prediction""" , """label""")
a__ : Any = dataset.map(lambda _lowercase: {"label": idalabel[example["label"]]})
a__ : List[Any] = dataset.shuffle(seed=args.seed)
a__ : List[str] = os.path.join(_lowerCamelCase , F'''train_pseudo.{args.data_file_extension}''')
if args.data_file_extension == "csv":
dataset.to_csv(_lowerCamelCase , index=_lowerCamelCase)
else:
dataset.to_json(_lowerCamelCase)
def lowerCAmelCase_ ( _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , **_lowercase : str) -> Optional[int]:
"""simple docstring"""
a__ : List[Any] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
a__ : Optional[int] = STModelArguments(model_name_or_path=_lowerCamelCase)
a__ : Dict = STDataArguments(train_file=_lowerCamelCase , infer_file=_lowerCamelCase)
a__ : List[str] = STTrainingArguments(output_dir=_lowerCamelCase)
a__ : Union[str, Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowerCamelCase).items():
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
for key, value in kwargs.items():
if hasattr(_lowerCamelCase , _lowerCamelCase):
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
# Sanity checks
a__ : Optional[int] = {}
a__ : List[Any] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
a__ : int = args.train_file
a__ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
a__ : List[Any] = args.eval_file
for key in data_files:
a__ : str = data_files[key].split(""".""")[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
a__ : int = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
logger.info("""Creating the initial data directory for self-training...""")
a__ : Optional[int] = F'''{args.output_dir}/self-train_iter-{{}}'''.format
a__ : str = data_dir_format(0)
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase)
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
accelerator.wait_for_everyone()
a__ : Dict = None
a__ : Union[str, Any] = None
a__ : Union[str, Any] = 0
a__ : Tuple = False
# Show the progress bar
a__ : Optional[int] = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process)
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations)):
a__ : List[str] = data_dir_format(_lowerCamelCase)
assert os.path.exists(_lowerCamelCase)
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
a__ : Dict = os.path.join(_lowerCamelCase , """stage-1""")
a__ : List[str] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowerCamelCase , _lowerCamelCase):
arguments_dict.update({key: value})
a__ : Optional[Any] = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase)
if os.path.exists(_lowerCamelCase):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _lowerCamelCase)
finetune(**_lowerCamelCase)
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase)
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _lowerCamelCase)
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
a__ : Tuple = os.path.join(_lowerCamelCase , """best-checkpoint""")
a__ : str = os.path.join(_lowerCamelCase , """stage-2""")
# Update arguments_dict
a__ : int = model_path
a__ : List[str] = data_files["train"]
a__ : List[str] = current_output_dir
a__ : str = os.path.join(_lowerCamelCase , """best-checkpoint""" , _lowerCamelCase)
if os.path.exists(_lowerCamelCase):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _lowerCamelCase)
finetune(**_lowerCamelCase)
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase)
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _lowerCamelCase)
a__ : Any = iteration
a__ : List[str] = data_dir_format(iteration + 1)
a__ : Optional[Any] = AutoConfig.from_pretrained(os.path.join(_lowerCamelCase , """best-checkpoint"""))
a__ : Optional[Any] = config.idalabel
a__ : Optional[int] = os.path.join(_lowerCamelCase , """eval_results_best-checkpoint.json""")
a__ : Dict = os.path.join(_lowerCamelCase , """test_results_best-checkpoint.json""")
assert os.path.exists(_lowerCamelCase)
with open(_lowerCamelCase , """r""") as f:
a__ : Dict = float(json.load(_lowerCamelCase)[args.eval_metric])
a__ : Any = os.path.join(_lowerCamelCase , """infer_output_best-checkpoint.csv""")
assert os.path.exists(_lowerCamelCase)
# Loading the dataset from local csv or json files.
a__ : List[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]})["data"]
a__ : Tuple = load_dataset("""csv""" , data_files={"""data""": infer_output_file})["data"]
if accelerator.is_main_process:
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase)
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F'''eval_results_iter-{iteration}.json'''))
if os.path.exists(_lowerCamelCase):
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F'''test_results_iter-{iteration}.json'''))
create_pseudo_labeled_data(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
accelerator.wait_for_everyone()
a__ : Dict = os.path.join(_lowerCamelCase , F'''train_pseudo.{args.data_file_extension}''')
if args.evaluation_strategy != IntervalStrategy.NO.value:
a__ : Union[str, Any] = eval_result
if best_iteration is None:
a__ : Union[str, Any] = new_iteration
a__ : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
a__ : Dict = new_iteration
a__ : Any = new_eval_result
a__ : str = 0
else:
if new_eval_result == best_eval_result:
a__ : str = new_iteration
a__ : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
a__ : Dict = True
progress_bar.update(1)
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , _lowerCamelCase)
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F'''eval_results_iter-{iteration}.json''') , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""") , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1)
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _lowerCamelCase)
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''') , os.path.join(_lowerCamelCase , """eval_results_best-iteration.json""") , )
| 170 | import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False):
try:
lowercase__ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
lowercase__ : int = default
else:
# KEY is set, convert it to True or False.
try:
lowercase__ : Optional[int] = strtobool(_lowerCamelCase)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''')
return _value
UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False)
UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True)
UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
UpperCamelCase = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
UpperCamelCase = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
UpperCamelCase = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
UpperCamelCase = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def lowercase_ ( _lowerCamelCase : int):
try:
import faiss # noqa
except ImportError:
lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import regex # noqa
except ImportError:
lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
try:
import elasticsearch # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
try:
import sqlalchemy # noqa
except ImportError:
lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.TORCH_AVAILABLE:
lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not config.TF_AVAILABLE:
lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not config.JAX_AVAILABLE:
lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not config.PIL_AVAILABLE:
lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[Any]):
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("test requires tiktoken")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
def _require_spacy_model(_lowerCamelCase : Optional[int]):
try:
import spacy # noqa F401
spacy.load(_lowerCamelCase)
except ImportError:
return unittest.skip("test requires spacy")(_lowerCamelCase)
except OSError:
return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase)
else:
return test_case
return _require_spacy_model
def lowercase_ ( _lowerCamelCase : Dict):
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("test requires pyspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : List[str]):
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("test requires joblibspark")(_lowerCamelCase)
else:
return test_case
def lowercase_ ( _lowerCamelCase : Dict):
if not _run_slow_tests or _run_slow_tests == 0:
lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : int):
if not _run_local_tests or _run_local_tests == 0:
lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Optional[int]):
if not _run_packaged_tests or _run_packaged_tests == 0:
lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase)
return test_case
def lowercase_ ( _lowerCamelCase : Tuple):
if not _run_remote_tests or _run_remote_tests == 0:
lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase)
return test_case
def lowercase_ ( *_lowerCamelCase : str):
def decorate(cls : str):
for name, fn in cls.__dict__.items():
if callable(_lowerCamelCase) and name.startswith("test"):
for decorator in decorators:
lowercase__ : Optional[int] = decorator(_lowerCamelCase)
setattr(cls , _lowerCamelCase , _lowerCamelCase)
return cls
return decorate
class snake_case_ ( __A ):
pass
class snake_case_ ( __A ):
__A : List[Any] = 0
__A : str = 1
__A : int = 2
@contextmanager
def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16):
lowercase__ : int = requests.Session().request
def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str):
# Change the url to an invalid url so that the connection hangs
lowercase__ : Any = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''')
lowercase__ : Dict = timeout
try:
return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
lowercase__ : Dict = url
lowercase__ : Union[str, Any] = e.args[0]
lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),)
lowercase__ : int = (max_retry_error,)
raise
def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple):
raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase)
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("requests.Session.send" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.Session.request" , _lowerCamelCase):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple):
lowercase__ : Dict = str(Path().resolve())
with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir:
try:
os.chdir(_lowerCamelCase)
yield
finally:
os.chdir(_lowerCamelCase)
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : Union[str, Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowercase_ ( ):
import gc
gc.collect()
lowercase__ : int = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]):
return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist()
def lowercase_ ( _lowerCamelCase : str):
import decorator
from requests.exceptions import HTTPError
def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict):
try:
return func(*_lowerCamelCase , **_lowerCamelCase)
except HTTPError as err:
if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"):
pytest.xfail(str(_lowerCamelCase))
raise err
return decorator.decorator(_wrapper , _lowerCamelCase)
class snake_case_ :
def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]:
lowercase__ : Tuple = returncode
lowercase__ : int = stdout
lowercase__ : Union[str, Any] = stderr
async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict):
while True:
lowercase__ : Optional[int] = await stream.readline()
if line:
callback(_lowerCamelCase)
else:
break
async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False):
if echo:
print("\nRunning: " , " ".join(_lowerCamelCase))
lowercase__ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
lowercase__ : str = []
lowercase__ : List[str] = []
def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""):
lowercase__ : Optional[int] = line.decode("utf-8").rstrip()
sink.append(_lowerCamelCase)
if not quiet:
print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase)
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")),
_read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")),
] , timeout=_lowerCamelCase , )
return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase)
def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True):
lowercase__ : Any = asyncio.get_event_loop()
lowercase__ : Tuple = loop.run_until_complete(
_stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase))
lowercase__ : int = " ".join(_lowerCamelCase)
if result.returncode > 0:
lowercase__ : Any = "\n".join(result.stderr)
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''')
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(f'''\'{cmd_str}\' produced no output.''')
return result
def lowercase_ ( ):
lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0")
lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M)
return int(_lowerCamelCase)
def lowercase_ ( ):
lowercase__ : Union[str, Any] = 2_9500
lowercase__ : Optional[int] = pytest_xdist_worker_id()
return port + uniq_delta
| 87 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 'mra'
def __init__( self , __lowerCamelCase=5_0_2_6_5 , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=5_1_2 , __lowerCamelCase=1 , __lowerCamelCase=0.02 , __lowerCamelCase=1E-5 , __lowerCamelCase="absolute" , __lowerCamelCase=4 , __lowerCamelCase="full" , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=0 , __lowerCamelCase=2 , **__lowerCamelCase , ) -> int:
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
_SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : str = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
_SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[int] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
_SCREAMING_SNAKE_CASE : int = type_vocab_size
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = position_embedding_type
_SCREAMING_SNAKE_CASE : str = block_per_row
_SCREAMING_SNAKE_CASE : str = approx_mode
_SCREAMING_SNAKE_CASE : Union[str, Any] = initial_prior_first_n_blocks
_SCREAMING_SNAKE_CASE : List[Any] = initial_prior_diagonal_n_blocks | 351 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
UpperCamelCase__ =logging.getLogger(__name__)
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None ) -> Optional[Any]:
super().__init__(
__lowerCamelCase , question_encoder_tokenizer=__lowerCamelCase , generator_tokenizer=__lowerCamelCase , index=__lowerCamelCase , init_retrieval=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : List[Any] = None
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Any:
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_SCREAMING_SNAKE_CASE : List[str] = self._infer_socket_ifname()
# avoid clash with the NCCL port
_SCREAMING_SNAKE_CASE : List[Any] = str(distributed_port + 1 )
_SCREAMING_SNAKE_CASE : int = dist.new_group(ranks=__lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self ) -> Optional[Any]:
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=torch.floataa ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.empty(__lowerCamelCase , dtype=__lowerCamelCase )
dist.scatter(__lowerCamelCase , src=0 , scatter_list=__lowerCamelCase , group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_SCREAMING_SNAKE_CASE : Any = next((addr for addr in addrs if addr.startswith("e" )) , __lowerCamelCase )
return ifname
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase ) -> Tuple[np.ndarray, List[dict]]:
# single GPU training
if not dist.is_initialized():
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._main_retrieve(__lowerCamelCase , __lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCamelCase )
# distributed training
_SCREAMING_SNAKE_CASE : Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
_SCREAMING_SNAKE_CASE : Any = None
if self._is_main():
_SCREAMING_SNAKE_CASE : Optional[Any] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(__lowerCamelCase )]
dist.gather(torch.tensor(__lowerCamelCase ) , dst=0 , gather_list=__lowerCamelCase , group=self.process_group )
# scatter logic
_SCREAMING_SNAKE_CASE : Optional[int] = question_hidden_states.shape[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = []
_SCREAMING_SNAKE_CASE : Optional[int] = []
if self._is_main():
assert len(__lowerCamelCase ) == world_size
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._main_retrieve(torch.cat(__lowerCamelCase ).numpy() , __lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = torch.tensor(__lowerCamelCase ), torch.tensor(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = self._chunk_tensor(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self._scattered(__lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_SCREAMING_SNAKE_CASE : Optional[Any] = self._scattered(__lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(__lowerCamelCase ) | 325 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : Any = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
lowercase : int = hex_num[0] == """-"""
if is_negative:
lowercase : List[Any] = hex_num[1:]
try:
lowercase : str = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
lowercase : Dict = """"""
while int_num > 0:
lowercase : str = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
lowerCamelCase : Any = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
lowerCamelCase : Optional[int] = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = CamembertTokenizer
_A : Union[str, Any] = CamembertTokenizerFast
_A : Union[str, Any] = True
_A : Tuple = True
def lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase : Union[str, Any] = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Dict = """<pad>"""
__lowercase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>NOTUSED""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__a ) , 1004 )
def lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Tuple = CamembertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
__lowercase : Tuple = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
__lowercase : List[str] = """I was born in 92000, and this is falsé."""
__lowercase : Optional[Any] = tokenizer.encode(__a )
__lowercase : List[Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
__lowercase : Tuple = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Union[str, Any] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
__lowercase : Dict = tokenizer.convert_ids_to_tokens(__a )
__lowercase : Any = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
def lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__lowercase : List[str] = self.get_tokenizer()
__lowercase : Any = self.get_rust_tokenizer()
__lowercase : Any = """I was born in 92000, and this is falsé."""
__lowercase : Tuple = tokenizer.tokenize(__a )
__lowercase : Optional[Any] = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__lowercase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
__lowercase : Dict = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
__lowercase : Any = self.get_rust_tokenizer()
__lowercase : str = tokenizer.encode(__a )
__lowercase : Union[str, Any] = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
@slow
def lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase : str = {"""input_ids""": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
__lowercase : List[str] = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""camembert-base""" , revision="""3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf""" , sequences=__a , ) | 233 | 0 |
def UpperCamelCase_ ( A__ : int = 60_08_51_47_51_43 ):
'''simple docstring'''
try:
lowerCAmelCase_ : List[str] = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
lowerCAmelCase_ : Optional[Any] = 2
lowerCAmelCase_ : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowerCAmelCase_ : Union[str, Any] = i
while n % i == 0:
lowerCAmelCase_ : List[str] = n // i
i += 1
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 352 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : int = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'efficientformer'
def __init__( self : Any , lowerCamelCase : List[int] = [3, 2, 6, 4] , lowerCamelCase : List[int] = [48, 96, 2_24, 4_48] , lowerCamelCase : List[bool] = [True, True, True, True] , lowerCamelCase : int = 4_48 , lowerCamelCase : int = 32 , lowerCamelCase : int = 4 , lowerCamelCase : int = 7 , lowerCamelCase : int = 5 , lowerCamelCase : int = 8 , lowerCamelCase : int = 4 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 16 , lowerCamelCase : int = 3 , lowerCamelCase : int = 3 , lowerCamelCase : int = 3 , lowerCamelCase : int = 2 , lowerCamelCase : int = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : int = 1 , lowerCamelCase : bool = True , lowerCamelCase : bool = True , lowerCamelCase : float = 1E-5 , lowerCamelCase : str = "gelu" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 1E-12 , lowerCamelCase : int = 2_24 , lowerCamelCase : float = 1E-05 , **lowerCamelCase : int , ) -> None:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[Any] = hidden_dropout_prob
lowerCAmelCase_ : Any = hidden_sizes
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = patch_size
lowerCAmelCase_ : List[str] = num_channels
lowerCAmelCase_ : Dict = depths
lowerCAmelCase_ : int = mlp_expansion_ratio
lowerCAmelCase_ : Optional[Any] = downsamples
lowerCAmelCase_ : Union[str, Any] = dim
lowerCAmelCase_ : Union[str, Any] = key_dim
lowerCAmelCase_ : str = attention_ratio
lowerCAmelCase_ : Tuple = resolution
lowerCAmelCase_ : Optional[Any] = pool_size
lowerCAmelCase_ : str = downsample_patch_size
lowerCAmelCase_ : Dict = downsample_stride
lowerCAmelCase_ : str = downsample_pad
lowerCAmelCase_ : str = drop_path_rate
lowerCAmelCase_ : List[Any] = num_metaad_blocks
lowerCAmelCase_ : Tuple = distillation
lowerCAmelCase_ : Optional[Any] = use_layer_scale
lowerCAmelCase_ : Dict = layer_scale_init_value
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : Optional[Any] = batch_norm_eps
| 89 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
A__ : Any = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_ )
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Any , **A_ : int):
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
requires_backends(self , '''torch''')
if self.framework != "pt":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""")
self.check_model_type(lowerCamelCase_)
def UpperCAmelCase__ ( self : Dict , **A_ : Any):
lowerCAmelCase_ : Tuple = {}
lowerCAmelCase_ : int = {}
lowerCAmelCase_ : List[str] = {}
# preprocess args
if "points_per_batch" in kwargs:
lowerCAmelCase_ : List[Any] = kwargs['''points_per_batch''']
if "points_per_crop" in kwargs:
lowerCAmelCase_ : Optional[Any] = kwargs['''points_per_crop''']
if "crops_n_layers" in kwargs:
lowerCAmelCase_ : Union[str, Any] = kwargs['''crops_n_layers''']
if "crop_overlap_ratio" in kwargs:
lowerCAmelCase_ : Union[str, Any] = kwargs['''crop_overlap_ratio''']
if "crop_n_points_downscale_factor" in kwargs:
lowerCAmelCase_ : List[Any] = kwargs['''crop_n_points_downscale_factor''']
# postprocess args
if "pred_iou_thresh" in kwargs:
lowerCAmelCase_ : Any = kwargs['''pred_iou_thresh''']
if "stability_score_offset" in kwargs:
lowerCAmelCase_ : Optional[Any] = kwargs['''stability_score_offset''']
if "mask_threshold" in kwargs:
lowerCAmelCase_ : int = kwargs['''mask_threshold''']
if "stability_score_thresh" in kwargs:
lowerCAmelCase_ : str = kwargs['''stability_score_thresh''']
if "crops_nms_thresh" in kwargs:
lowerCAmelCase_ : List[str] = kwargs['''crops_nms_thresh''']
if "output_rle_mask" in kwargs:
lowerCAmelCase_ : List[Any] = kwargs['''output_rle_mask''']
if "output_bboxes_mask" in kwargs:
lowerCAmelCase_ : List[str] = kwargs['''output_bboxes_mask''']
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : str , A_ : Tuple , *A_ : Optional[int] , A_ : Union[str, Any]=None , A_ : str=None , **A_ : Dict):
return super().__call__(lowerCamelCase_ , *lowerCamelCase_ , num_workers=lowerCamelCase_ , batch_size=lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self : str , A_ : str , A_ : int=6_4 , A_ : Any = 0 , A_ : Optional[Any] = 5_1_2 / 1_5_0_0 , A_ : List[str] = 3_2 , A_ : Tuple = 1 , ):
lowerCAmelCase_ : List[Any] = load_image(lowerCamelCase_)
lowerCAmelCase_ : List[Any] = self.image_processor.size['''longest_edge''']
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.image_processor.generate_crop_boxes(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
lowerCAmelCase_ : Dict = self.image_processor(images=lowerCamelCase_ , return_tensors='''pt''')
with self.device_placement():
if self.framework == "pt":
lowerCAmelCase_ : List[str] = self.get_inference_context()
with inference_context():
lowerCAmelCase_ : Any = self._ensure_tensor_on_device(lowerCamelCase_ , device=self.device)
lowerCAmelCase_ : List[Any] = self.model.get_image_embeddings(model_inputs.pop('''pixel_values'''))
lowerCAmelCase_ : List[Any] = image_embeddings
lowerCAmelCase_ : Any = grid_points.shape[1]
lowerCAmelCase_ : int = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
'''Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. '''
'''To return all points at once, set points_per_batch to None''')
for i in range(0 , lowerCamelCase_ , lowerCamelCase_):
lowerCAmelCase_ : List[str] = grid_points[:, i : i + points_per_batch, :, :]
lowerCAmelCase_ : str = input_labels[:, i : i + points_per_batch]
lowerCAmelCase_ : List[Any] = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def UpperCAmelCase__ ( self : Tuple , A_ : Optional[int] , A_ : Optional[Any]=0.88 , A_ : Optional[int]=0.95 , A_ : Tuple=0 , A_ : Optional[Any]=1 , ):
lowerCAmelCase_ : str = model_inputs.pop('''input_boxes''')
lowerCAmelCase_ : Dict = model_inputs.pop('''is_last''')
lowerCAmelCase_ : Optional[Any] = model_inputs.pop('''original_sizes''').tolist()
lowerCAmelCase_ : str = model_inputs.pop('''reshaped_input_sizes''').tolist()
lowerCAmelCase_ : List[str] = self.model(**lowerCamelCase_)
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowerCAmelCase_ : Optional[Any] = model_outputs['''pred_masks''']
lowerCAmelCase_ : Tuple = self.image_processor.post_process_masks(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , binarize=lowerCamelCase_)
lowerCAmelCase_ : List[str] = model_outputs['''iou_scores''']
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : List[str] = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : Any=False , A_ : Dict=False , A_ : Tuple=0.7 , ):
lowerCAmelCase_ : Optional[int] = []
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Optional[int] = []
for model_output in model_outputs:
all_scores.append(model_output.pop('''iou_scores'''))
all_masks.extend(model_output.pop('''masks'''))
all_boxes.append(model_output.pop('''boxes'''))
lowerCAmelCase_ : Any = torch.cat(lowerCamelCase_)
lowerCAmelCase_ : Optional[int] = torch.cat(lowerCamelCase_)
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Any = self.image_processor.post_process_for_mask_generation(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
lowerCAmelCase_ : Optional[int] = defaultdict(lowerCamelCase_)
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCamelCase_)
lowerCAmelCase_ : Optional[int] = {}
if output_rle_mask:
lowerCAmelCase_ : List[Any] = rle_mask
if output_bboxes_mask:
lowerCAmelCase_ : List[str] = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 103 |
import cmath
import math
def a( A : float , A : float , A : float , A : float ) -> complex:
"""simple docstring"""
a = math.radians(A )
a = math.radians(A )
# Convert voltage and current to rectangular form
a = cmath.rect(A , A )
a = cmath.rect(A , A )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 367 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase ( _SCREAMING_SNAKE_CASE : np.ndarray ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = np.shape(_SCREAMING_SNAKE_CASE )
if rows != columns:
_UpperCAmelCase = (
'''\'table\' has to be of square shaped array but got a '''
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.zeros((rows, columns) )
_UpperCAmelCase = np.zeros((rows, columns) )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
_UpperCAmelCase = (table[i][j] - total) / upper[j][j]
_UpperCAmelCase = 1
for j in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = sum(lower[i][k] * upper[k][j] for k in range(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 0 |
'''simple docstring'''
import numpy as np
import qiskit
def UpperCamelCase__ ( lowerCAmelCase = 8 , lowerCAmelCase = None ):
"""simple docstring"""
_lowerCAmelCase = np.random.default_rng(seed=lowerCAmelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase = rng.integers(2 , size=lowerCAmelCase )
# The set of states Alice will prepare.
_lowerCAmelCase = rng.integers(2 , size=lowerCAmelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase = rng.integers(2 , size=lowerCAmelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase = qiskit.QuantumCircuit(lowerCAmelCase , name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase ):
if alice_state[index] == 1:
bbaa_circ.x(lowerCAmelCase )
if alice_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(lowerCAmelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(lowerCAmelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase = qiskit.execute(lowerCAmelCase , lowerCAmelCase , shots=1 , seed_simulator=lowerCAmelCase )
# Returns the result of measurement.
_lowerCAmelCase = job.result().get_counts(lowerCAmelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase = gen_key[:key_len] if len(lowerCAmelCase ) >= key_len else gen_key.ljust(lowerCAmelCase , """0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 70 |
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
__UpperCAmelCase : Any = "src/diffusers"
# Matches is_xxx_available()
__UpperCAmelCase : List[str] = re.compile(R"is\_([a-z_]*)_available\(\)")
# Matches from xxx import bla
__UpperCAmelCase : Dict = re.compile(R"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
__UpperCAmelCase : int = "\n{0} = None\n"
__UpperCAmelCase : List[str] = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n"
__UpperCAmelCase : Tuple = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: int = _re_backend.findall(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE__)
def A__ ( ) -> Optional[int]:
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """__init__.py""") , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: Optional[Any] = f.readlines()
# Get to the point we do the actual imports for type checking
__snake_case: Tuple = 0
__snake_case: Any = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE__):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__snake_case: List[Any] = find_backend(lines[line_index])
if backend is not None:
while not lines[line_index].startswith("""else:"""):
line_index += 1
line_index += 1
__snake_case: Any = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE__) and len(lines[line_index]) > 1:
__snake_case: List[Any] = lines[line_index]
__snake_case: str = _re_single_line_import.search(SCREAMING_SNAKE_CASE__)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(""", """))
elif line.startswith(""" """ * 8):
objects.append(line[8:-2])
line_index += 1
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Optional[Any] = objects
else:
line_index += 1
return backend_specific_objects
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE__)
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__=None) -> Optional[int]:
if backend_specific_objects is None:
__snake_case: Union[str, Any] = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__snake_case: Union[str, Any] = {}
for backend, objects in backend_specific_objects.items():
__snake_case: List[Any] = """[""" + """, """.join(F'''"{b}"''' for b in backend.split("""_and_""")) + """]"""
__snake_case: Dict = """# This file is autogenerated by the command `make fix-copies`, do not edit.\n"""
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) for o in objects])
__snake_case: List[str] = dummy_file
return dummy_files
def A__ ( SCREAMING_SNAKE_CASE__=False) -> int:
__snake_case: List[str] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__snake_case: Dict = {"""torch""": """pt"""}
# Locate actual dummy modules and read their content.
__snake_case: Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , """utils""")
__snake_case: List[Any] = {
backend: os.path.join(SCREAMING_SNAKE_CASE__ , F'''dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py''')
for backend in dummy_files.keys()
}
__snake_case: int = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE__):
with open(SCREAMING_SNAKE_CASE__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: Any = f.read()
else:
__snake_case: Tuple = """"""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py as the main '''
"""__init__ has new objects.""")
with open(dummy_file_paths[backend] , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.write(dummy_files[backend])
else:
raise ValueError(
"""The main __init__ has objects that are not present in """
F'''diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)}_objects.py. Run `make fix-copies` '''
"""to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : List[Any] = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 111 | 0 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : List[str] = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=8 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=16 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=36 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : int = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Union[str, Any] = seq_length
__UpperCAmelCase : int = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : List[str] = use_token_type_ids
__UpperCAmelCase : List[str] = use_labels
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Tuple = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : List[Any] = hidden_act
__UpperCAmelCase : Optional[Any] = hidden_dropout_prob
__UpperCAmelCase : List[Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : List[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[Any] = num_labels
__UpperCAmelCase : Optional[Any] = num_choices
__UpperCAmelCase : int = scope
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[Any] = None
if self.use_input_mask:
__UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Any = None
if self.use_token_type_ids:
__UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ) -> List[str]:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.get_config()
__UpperCAmelCase : List[Any] = 300
return config
def __A ( self ) -> Dict:
'''simple docstring'''
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Any = self.prepare_config_and_inputs()
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MraModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[str] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : Any = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
__UpperCAmelCase : List[str] = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> str:
'''simple docstring'''
__UpperCAmelCase : List[str] = True
__UpperCAmelCase : List[Any] = MraModel(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , encoder_attention_mask=__UpperCAmelCase , )
__UpperCAmelCase : Dict = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , )
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : Any = MraForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[int] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = MraForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : int = MraForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : str = MraForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.num_choices
__UpperCAmelCase : int = MraForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : int = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Dict = ()
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = MraModelTester(self )
__UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def __A ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def __A ( self ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> int:
'''simple docstring'''
__UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __A ( self ) -> str:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def __A ( self ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def __A ( self ) -> Any:
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase )
@slow
def __A ( self ) -> Any:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = MraModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@unittest.skip(reason="""MRA does not output attentions""" )
def __A ( self ) -> List[Any]:
'''simple docstring'''
return
@require_torch
class _A ( unittest.TestCase ):
@slow
def __A ( self ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = MraModel.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : str = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-512-4""" )
__UpperCAmelCase : Union[str, Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : int = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Union[str, Any] = 50_265
__UpperCAmelCase : Union[str, Any] = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : int = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MraForMaskedLM.from_pretrained("""uw-madison/mra-base-4096-8-d3""" )
__UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 )
with torch.no_grad():
__UpperCAmelCase : Any = model(__UpperCAmelCase )[0]
__UpperCAmelCase : Dict = 50_265
__UpperCAmelCase : Optional[int] = torch.Size((1, 4_096, vocab_size) )
self.assertEqual(output.shape , __UpperCAmelCase )
__UpperCAmelCase : str = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 ) )
| 16 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class __UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , _A : int = 16 , _A : int = 88 , _A : Optional[int] = None , _A : int = 1 , _A : float = 0.0 , _A : int = 32 , _A : Optional[int] = None , _A : bool = False , _A : Optional[int] = None , _A : Optional[int] = None , _A : str = "geglu" , _A : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : List[str] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE : List[Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE : Tuple = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE : Tuple = [1, 0]
def UpperCAmelCase__ ( self : Optional[Any] , _A : int , _A : int , _A : Any=None , _A : Optional[int]=None , _A : str=None , _A : bool = True , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = hidden_states
__SCREAMING_SNAKE_CASE : Tuple = []
__SCREAMING_SNAKE_CASE : List[str] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE : int = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE : Tuple = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE : Dict = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE : str = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE : Union[str, Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 303 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCamelCase ( _a ):
UpperCAmelCase__ : Union[str, Any] = """levit"""
def __init__(self : Any , _A : Optional[int]=2_2_4 , _A : Union[str, Any]=3 , _A : Optional[Any]=3 , _A : str=2 , _A : str=1 , _A : Tuple=1_6 , _A : Dict=[1_2_8, 2_5_6, 3_8_4] , _A : List[Any]=[4, 8, 1_2] , _A : List[Any]=[4, 4, 4] , _A : List[str]=[1_6, 1_6, 1_6] , _A : Union[str, Any]=0 , _A : str=[2, 2, 2] , _A : List[Any]=[2, 2, 2] , _A : Union[str, Any]=0.02 , **_A : str , ) -> Optional[Any]:
super().__init__(**_A )
snake_case = image_size
snake_case = num_channels
snake_case = kernel_size
snake_case = stride
snake_case = padding
snake_case = hidden_sizes
snake_case = num_attention_heads
snake_case = depths
snake_case = key_dim
snake_case = drop_path_rate
snake_case = patch_size
snake_case = attention_ratio
snake_case = mlp_ratio
snake_case = initializer_range
snake_case = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCamelCase ( _a ):
UpperCAmelCase__ : Any = version.parse("1.11" )
@property
def UpperCAmelCase(self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase(self : List[str] ) -> float:
return 1E-4
| 368 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 137 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.