code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case : Union[str, Any] = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[str] = ['''SpeechEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['''FlaxSpeechEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 94 |
from maths.prime_factors import prime_factors
def __lowerCamelCase ( UpperCAmelCase_ : int ):
"""simple docstring"""
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :Dict = F'''Input value of [number={number}] must be an integer'''
raise TypeError(UpperCAmelCase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(UpperCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 | 1 |
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class __SCREAMING_SNAKE_CASE ( __lowercase):
_SCREAMING_SNAKE_CASE : Any = '''MCTCTFeatureExtractor'''
_SCREAMING_SNAKE_CASE : List[str] = '''AutoTokenizer'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
def __call__( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_UpperCamelCase , **_UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowerCAmelCase__ = kwargs.pop('raw_speech' )
else:
lowerCAmelCase__ = kwargs.pop('audio' , _UpperCamelCase )
lowerCAmelCase__ = kwargs.pop('sampling_rate' , _UpperCamelCase )
lowerCAmelCase__ = kwargs.pop('text' , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowerCAmelCase__ = self.feature_extractor(_UpperCamelCase , *_UpperCamelCase , sampling_rate=_UpperCamelCase , **_UpperCamelCase )
if text is not None:
lowerCAmelCase__ = self.tokenizer(_UpperCamelCase , **_UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase__ = encodings['input_ids']
return inputs
def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_UpperCamelCase , **_UpperCamelCase )
def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = kwargs.pop('input_features' , _UpperCamelCase )
lowerCAmelCase__ = kwargs.pop('labels' , _UpperCamelCase )
if len(_UpperCamelCase ) > 0:
lowerCAmelCase__ = args[0]
lowerCAmelCase__ = args[1:]
if input_features is not None:
lowerCAmelCase__ = self.feature_extractor.pad(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase )
if labels is not None:
lowerCAmelCase__ = self.tokenizer.pad(_UpperCamelCase , **_UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowerCAmelCase__ = labels['input_ids']
return input_features
def UpperCamelCase__ ( self , *_UpperCamelCase , **_UpperCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_UpperCamelCase , **_UpperCamelCase )
@contextmanager
def UpperCamelCase__ ( self ):
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer
yield
lowerCAmelCase__ = self.feature_extractor
lowerCAmelCase__ = False
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Union[str, Any] = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 122 | 0 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 19 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
SCREAMING_SNAKE_CASE : int = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE : Dict = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
SCREAMING_SNAKE_CASE : Any = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__snake_case )}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'The input training data file (a text file).'} )
lowerCamelCase__ =field(
default=__snake_case, metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
lowerCamelCase__ =field(default=__snake_case, metadata={'help': 'Whether ot not to use whole word mask.'} )
lowerCamelCase__ =field(
default=0.1_5, metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
lowerCamelCase__ =field(
default=1 / 6, metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
}, )
lowerCamelCase__ =field(
default=5, metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
lowerCamelCase__ =field(
default=-1, metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
}, )
lowerCamelCase__ =field(
default=__snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowercase ( _snake_case : DataTrainingArguments , _snake_case : PreTrainedTokenizer , _snake_case : bool = False , _snake_case : Optional[str] = None , ) ->Any:
"""simple docstring"""
def _dataset(_snake_case : List[Any] , _snake_case : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' )
return LineByLineWithRefDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , ref_path=_snake_case , )
return LineByLineTextDataset(tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_snake_case , file_path=_snake_case , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_snake_case , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_snake_case ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
__snake_case : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
__snake_case , __snake_case , __snake_case : Union[str, Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '''
'''or remove the --do_eval argument.''' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
__snake_case : Tuple = CONFIG_MAPPING[model_args.model_type]()
logger.warning('''You are instantiating a new config instance from scratch.''' )
if model_args.tokenizer_name:
__snake_case : Dict = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
__snake_case : List[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'''
''' script, save it,and load it from here, using --tokenizer_name''' )
if model_args.model_name_or_path:
__snake_case : int = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_snake_case , cache_dir=model_args.cache_dir , )
else:
logger.info('''Training new model from scratch''' )
__snake_case : List[Any] = AutoModelWithLMHead.from_config(_snake_case )
model.resize_token_embeddings(len(_snake_case ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'''
'''--mlm flag (masked language modeling).''' )
if data_args.block_size <= 0:
__snake_case : List[str] = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
__snake_case : Optional[int] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
__snake_case : Optional[Any] = (
get_dataset(_snake_case , tokenizer=_snake_case , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
__snake_case : Any = (
get_dataset(_snake_case , tokenizer=_snake_case , evaluate=_snake_case , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
__snake_case : List[Any] = DataCollatorForPermutationLanguageModeling(
tokenizer=_snake_case , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
__snake_case : Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_snake_case , mlm_probability=data_args.mlm_probability )
else:
__snake_case : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=_snake_case , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__snake_case : Optional[int] = Trainer(
model=_snake_case , args=_snake_case , data_collator=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , prediction_loss_only=_snake_case , )
# Training
if training_args.do_train:
__snake_case : Dict = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_snake_case )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__snake_case : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__snake_case : Dict = trainer.evaluate()
__snake_case : Dict = math.exp(eval_output['''eval_loss'''] )
__snake_case : List[Any] = {'''perplexity''': perplexity}
__snake_case : str = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' )
if trainer.is_world_master():
with open(_snake_case , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , _snake_case , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
results.update(_snake_case )
return results
def lowercase ( _snake_case : Optional[int] ) ->Tuple:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 102 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : int):
A = tempfile.mkdtemp()
A = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
A = {
"do_resize": True,
"size": {"height": 2_2_4, "width": 2_2_4},
"do_center_crop": True,
"crop_size": {"height": 1_8, "width": 1_8},
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"do_convert_rgb": True,
}
A = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE)
with open(self.image_processor_file , "w" , encoding="utf-8") as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , **__SCREAMING_SNAKE_CASE : int):
return BertTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple , **__SCREAMING_SNAKE_CASE : Optional[Any]):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Dict):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta)]
A = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1)) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = self.get_image_processor()
A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_slow.save_pretrained(self.tmpdirname)
A = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE)
A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
processor_fast.save_pretrained(self.tmpdirname)
A = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE)
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Any):
A = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
A = self.get_tokenizer(cls_token="(CLS)" , sep_token="(SEP)")
A = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE)
A = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="(CLS)" , sep_token="(SEP)" , do_normalize=__SCREAMING_SNAKE_CASE)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Dict):
A = self.get_image_processor()
A = self.get_tokenizer()
A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = self.prepare_image_inputs()
A = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="np")
A = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2)
def SCREAMING_SNAKE_CASE__ (self : Union[str, Any]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "Alexandra,T-shirt的价格是15便士。"
A = processor(text=__SCREAMING_SNAKE_CASE)
A = tokenizer(__SCREAMING_SNAKE_CASE)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE__ (self : List[Any]):
A = self.get_image_processor()
A = self.get_tokenizer()
A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "Alexandra,T-shirt的价格是15便士。"
A = self.prepare_image_inputs()
A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE):
processor()
def SCREAMING_SNAKE_CASE__ (self : Any):
A = self.get_image_processor()
A = self.get_tokenizer()
A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(__SCREAMING_SNAKE_CASE)
A = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : int):
A = self.get_image_processor()
A = self.get_tokenizer()
A = ChineseCLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE)
A = "Alexandra,T-shirt的价格是15便士。"
A = self.prepare_image_inputs()
A = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 350 |
"""simple docstring"""
__A : Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__A : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__A : List[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 57 | 0 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
lowercase__ : Optional[int] = logging.getLogger(__name__)
lowercase__ : int = {'''facebook/bart-base''': BartForConditionalGeneration}
lowercase__ : List[str] = {'''facebook/bart-base''': BartTokenizer}
def SCREAMING_SNAKE_CASE_ ( ) -> int:
lowerCAmelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=snake_case__ , default=snake_case__ , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=snake_case__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=snake_case__ , default=snake_case__ , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=snake_case__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=snake_case__ , )
parser.add_argument(
'''--config_name''' , type=snake_case__ , default=snake_case__ , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=snake_case__ , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=snake_case__ , default=snake_case__ , help='''Where to store the final ONNX file.''' )
lowerCAmelCase = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__="cpu" ) -> List[str]:
lowerCAmelCase = model_dict[model_name].from_pretrained(snake_case__ ).to(snake_case__ )
lowerCAmelCase = tokenizer_dict[model_name].from_pretrained(snake_case__ )
if model_name in ["facebook/bart-base"]:
lowerCAmelCase = 0
lowerCAmelCase = None
lowerCAmelCase = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> Tuple:
model.eval()
lowerCAmelCase = None
lowerCAmelCase = torch.jit.script(BARTBeamSearchGenerator(snake_case__ ) )
with torch.no_grad():
lowerCAmelCase = '''My friends are cool but they eat too many carbs.'''
lowerCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='''pt''' ).to(model.device )
lowerCAmelCase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=snake_case__ , max_length=snake_case__ , early_stopping=snake_case__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case__ , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case__ , opset_version=1_4 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=snake_case__ , )
logger.info('''Model exported to {}'''.format(snake_case__ ) )
lowerCAmelCase = remove_dup_initializers(os.path.abspath(snake_case__ ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(snake_case__ ) )
lowerCAmelCase = onnxruntime.InferenceSession(snake_case__ )
lowerCAmelCase = ort_sess.run(
snake_case__ , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(snake_case__ ),
'''max_length''': np.array(snake_case__ ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
lowerCAmelCase = parse_args()
lowerCAmelCase = 5
lowerCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
lowerCAmelCase = torch.device(args.device )
lowerCAmelCase , lowerCAmelCase = load_model_tokenizer(args.model_name_or_path , snake_case__ )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(snake_case__ )
if args.max_length:
lowerCAmelCase = args.max_length
if args.num_beams:
lowerCAmelCase = args.num_beams
if args.output_file_path:
lowerCAmelCase = args.output_file_path
else:
lowerCAmelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
main()
| 338 | import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ : List[Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
lowercase__ : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowercase__ : Any = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
lowercase__ : Tuple = '''▁'''
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = VOCAB_FILES_NAMES
UpperCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<unk>" , __SCREAMING_SNAKE_CASE="[SEP]" , __SCREAMING_SNAKE_CASE="<pad>" , __SCREAMING_SNAKE_CASE="[CLS]" , __SCREAMING_SNAKE_CASE="[MASK]" , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ) ->None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowerCAmelCase = (
AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE , normalized=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = do_lower_case
lowerCAmelCase = remove_space
lowerCAmelCase = keep_accents
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[Any]:
lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) ->int:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self , __SCREAMING_SNAKE_CASE ) ->Tuple:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Any:
if self.remove_space:
lowerCAmelCase = ''' '''.join(inputs.strip().split() )
else:
lowerCAmelCase = inputs
lowerCAmelCase = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase = unicodedata.normalize('''NFKD''' , __SCREAMING_SNAKE_CASE )
lowerCAmelCase = ''''''.join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->List[str]:
lowerCAmelCase = self.preprocess_text(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = []
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase = cur_pieces[1:]
else:
lowerCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->int:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
lowerCAmelCase = []
lowerCAmelCase = ''''''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False ) ->List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) ->Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 338 | 1 |
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive.' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase_( lowerCamelCase_ ) -> float:
if edge <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError('Length must be a positive.' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
def UpperCamelCase_( lowerCamelCase_ = 1000 ) -> int:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 84 | 0 |
_A : str = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
_A : Tuple = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> list[int]:
"""simple docstring"""
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : Optional[int] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__snake_case , __snake_case , __snake_case )
order.append(__snake_case )
return order
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> list[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Optional[int] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__snake_case , __snake_case , __snake_case )
return component
def _a ( UpperCAmelCase ) -> list[list[int]]:
"""simple docstring"""
lowerCamelCase__ : Tuple = len(__snake_case ) * [False]
lowerCamelCase__ : Tuple = {vert: [] for vert in range(len(__snake_case ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__snake_case )
lowerCamelCase__ : Dict = []
for i, was_visited in enumerate(__snake_case ):
if not was_visited:
order += topology_sort(__snake_case , __snake_case , __snake_case )
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[int] = len(__snake_case ) * [False]
for i in range(len(__snake_case ) ):
lowerCamelCase__ : Dict = order[len(__snake_case ) - i - 1]
if not visited[vert]:
lowerCamelCase__ : Union[str, Any] = find_components(__snake_case , __snake_case , __snake_case )
components_list.append(__snake_case )
return components_list
| 142 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a ):
return len(set(a ) ) == len(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 | """simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE__:Tuple = logging.get_logger(__name__)
class snake_case__ ( snake_case_ ):
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = True , lowerCamelCase = 1 / 255 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
__a = size if size is not None else {"shortest_edge": 384}
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = do_resize
__a = size
# Default value set here for backwards compatibility where the value in config is None
__a = crop_pct if crop_pct is not None else 224 / 256
__a = resample
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ):
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(F"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}" )
__a = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__a = int(shortest_edge / crop_pct )
__a = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__a = resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase , **lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ):
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ):
__a = do_resize if do_resize is not None else self.do_resize
__a = crop_pct if crop_pct is not None else self.crop_pct
__a = resample if resample is not None else self.resample
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__a = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__a = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__a = [self.resize(image=lowerCamelCase , size=lowerCamelCase , crop_pct=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__a = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__a = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__a = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__a = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 268 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
def wrapper(*__lowerCAmelCase , **__lowerCAmelCase ):
__lowercase : Any = timeit.default_timer()
__lowercase : int = func(*__lowerCAmelCase , **__lowerCAmelCase )
__lowercase : List[Any] = timeit.default_timer() - starttime
return delta
__lowercase : List[str] = func.__name__
return wrapper
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ) -> str:
__lowercase : Dict = []
__lowercase : Union[str, Any] = seq_shapes or {}
for i in range(__lowerCAmelCase ):
__lowercase : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCAmelCase , _ArrayXD ):
__lowercase : int = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
__lowercase : int = '''The small grey turtle was surprisingly fast when challenged.'''
else:
__lowercase : Optional[int] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCAmelCase , datasets.Sequence ):
while isinstance(__lowerCAmelCase , datasets.Sequence ):
__lowercase : List[str] = v.feature
__lowercase : int = seq_shapes[k]
__lowercase : int = np.random.rand(*__lowerCAmelCase ).astype(v.dtype )
__lowercase : Dict = data
dummy_data.append((i, example) )
return dummy_data
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ) -> List[Any]:
__lowercase : Dict = generate_examples(__lowerCAmelCase , num_examples=__lowerCAmelCase , seq_shapes=__lowerCAmelCase )
with ArrowWriter(features=__lowerCAmelCase , path=__lowerCAmelCase ) as writer:
for key, record in dummy_data:
__lowercase : Dict = features.encode_example(__lowerCAmelCase )
writer.write(__lowerCAmelCase )
__lowercase , __lowercase : str = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__lowercase : int = datasets.Dataset.from_file(filename=__lowerCAmelCase , info=datasets.DatasetInfo(features=__lowerCAmelCase ) )
return dataset
| 156 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None ) -> int:
if attention_mask is None:
__lowercase : int = tf.cast(tf.math.not_equal(__lowerCAmelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __lowerCAmelCase :
"""simple docstring"""
A__ : Union[str, Any] = OPTConfig
A__ : Optional[int] = {}
A__ : Optional[int] = '''gelu'''
def __init__( self : Tuple , _snake_case : Dict , _snake_case : List[str]=13 , _snake_case : Optional[Any]=7 , _snake_case : List[str]=True , _snake_case : Union[str, Any]=False , _snake_case : Union[str, Any]=99 , _snake_case : Dict=16 , _snake_case : Any=2 , _snake_case : Dict=4 , _snake_case : List[Any]=4 , _snake_case : Optional[int]="gelu" , _snake_case : List[str]=0.1 , _snake_case : List[str]=0.1 , _snake_case : List[Any]=20 , _snake_case : Any=2 , _snake_case : List[str]=1 , _snake_case : Tuple=0 , _snake_case : Dict=16 , _snake_case : Tuple=16 , ):
__lowercase : Dict = parent
__lowercase : str = batch_size
__lowercase : List[str] = seq_length
__lowercase : Optional[int] = is_training
__lowercase : Optional[int] = use_labels
__lowercase : Optional[int] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Dict = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Dict = hidden_act
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : Union[str, Any] = max_position_embeddings
__lowercase : Any = eos_token_id
__lowercase : List[Any] = pad_token_id
__lowercase : Optional[int] = bos_token_id
__lowercase : List[str] = embed_dim
__lowercase : Any = word_embed_proj_dim
__lowercase : Optional[int] = False
def snake_case_ ( self : Optional[Any] ):
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__lowercase : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
__lowercase : Any = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=_snake_case , **self.config_updates , )
__lowercase : Optional[int] = prepare_opt_inputs_dict(_snake_case , _snake_case )
return config, inputs_dict
def snake_case_ ( self : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
__lowercase : int = TFOPTModel(config=_snake_case )
__lowercase : Union[str, Any] = inputs_dict['''input_ids''']
__lowercase : Tuple = input_ids[:1, :]
__lowercase : Optional[Any] = inputs_dict['''attention_mask'''][:1, :]
__lowercase : Dict = 1
# first forward pass
__lowercase : Dict = model(_snake_case , attention_mask=_snake_case , use_cache=_snake_case )
__lowercase , __lowercase : int = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__lowercase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowercase : List[str] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__lowercase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
__lowercase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__lowercase : Any = model(_snake_case , attention_mask=_snake_case )[0]
__lowercase : List[Any] = model(_snake_case , attention_mask=_snake_case , past_key_values=_snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__lowercase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__lowercase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
__lowercase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_snake_case , _snake_case , rtol=1E-3 )
@require_tf
class __lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : List[Any] = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
A__ : int = (TFOPTForCausalLM,) if is_tf_available() else ()
A__ : List[str] = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
A__ : Union[str, Any] = False
A__ : Optional[int] = False
A__ : int = False
A__ : List[str] = 1_0
def snake_case_ ( self : Any ):
__lowercase : Optional[Any] = TFOPTModelTester(self )
__lowercase : List[Any] = ConfigTester(self , config_class=_snake_case )
def snake_case_ ( self : Tuple ):
self.config_tester.run_common_tests()
def snake_case_ ( self : int ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_snake_case )
def snake_case_ ( self : List[Any] ):
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(_snake_case : Optional[int] , _snake_case : Dict ):
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(_snake_case , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
__lowercase : Union[str, Any] = model_class(config=_snake_case )
__lowercase : int = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : str = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(_snake_case )
__lowercase : Dict = _get_word_embedding_weight(_snake_case , model.get_input_embeddings() )
__lowercase : Tuple = _get_word_embedding_weight(_snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
__lowercase : List[str] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , _snake_case )
# check that weights remain the same after resizing
__lowercase : str = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : str = False
self.assertTrue(_snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , _snake_case )
__lowercase : Optional[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
__lowercase : Optional[Any] = False
self.assertTrue(_snake_case )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Dict:
return tf.constant(__lowerCAmelCase , dtype=tf.intaa )
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = 9_9
def snake_case_ ( self : List[Any] ):
__lowercase : List[Any] = tf.ones((4, 1) , dtype=tf.intaa ) * 2
__lowercase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
__lowercase : List[str] = input_ids.shape[0]
__lowercase : int = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Tuple ):
__lowercase : int = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
__lowercase : Optional[int] = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
__lowercase : int = tf.not_equal(_snake_case , model.config.pad_token_id )
with tf.GradientTape():
__lowercase : Optional[int] = model(input_ids=_snake_case , attention_mask=_snake_case ).last_hidden_state
__lowercase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , _snake_case )
__lowercase : Tuple = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-3 ) )
__lowercase : Union[str, Any] = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : Dict = xla_generate(_snake_case , _snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , _snake_case , atol=4E-2 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Optional[Any] ):
super().setUp()
__lowercase : Tuple = '''facebook/opt-350m'''
def snake_case_ ( self : int ):
__lowercase : Any = TFOPTForCausalLM.from_pretrained(self.path_model )
__lowercase : Dict = GPTaTokenizer.from_pretrained(self.path_model )
__lowercase : Union[str, Any] = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
__lowercase : int = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case , add_special_tokens=_snake_case )
__lowercase : str = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
__lowercase : str = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
__lowercase : Any = tf.function(_snake_case , jit_compile=_snake_case )
__lowercase : List[str] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(_snake_case , _snake_case , atol=1E-4 ) )
@require_tf
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case_ ( self : str ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[Any] = '''facebook/opt-125m'''
__lowercase : int = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Dict = []
__lowercase : Optional[int] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : List[Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(_snake_case , max_length=10 )
__lowercase : Union[str, Any] = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[str] = '''facebook/opt-350m'''
__lowercase : List[Any] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : Tuple = TFOPTForCausalLM.from_pretrained(_snake_case )
__lowercase : List[str] = '''left'''
# use different length sentences to test batching
__lowercase : str = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__lowercase : List[str] = tokenizer(_snake_case , return_tensors='''tf''' , padding=_snake_case )
__lowercase : Optional[Any] = inputs['''input_ids''']
__lowercase : List[str] = model.generate(input_ids=_snake_case , attention_mask=inputs['''attention_mask'''] )
__lowercase : List[str] = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
__lowercase : int = model.generate(input_ids=_snake_case )
__lowercase : Optional[Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
__lowercase : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
__lowercase : Optional[Any] = model.generate(input_ids=_snake_case , max_length=model.config.max_length - num_paddings )
__lowercase : str = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
__lowercase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_snake_case )
__lowercase : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=_snake_case )
__lowercase : List[Any] = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(_snake_case , _snake_case )
self.assertListEqual(_snake_case , [non_padded_sentence, padded_sentence] )
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[Any] = '''facebook/opt-350m'''
__lowercase : str = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
__lowercase : Union[str, Any] = []
__lowercase : List[str] = GPTaTokenizer.from_pretrained(_snake_case )
__lowercase : List[str] = TFOPTForCausalLM.from_pretrained(_snake_case )
for prompt in self.prompts:
__lowercase : Union[str, Any] = tokenizer(_snake_case , return_tensors='''tf''' ).input_ids
__lowercase : List[str] = model.generate(_snake_case , max_length=10 )
__lowercase : Tuple = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
predicted_outputs += generated_string
self.assertListEqual(_snake_case , _snake_case )
| 156 | 1 |
"""simple docstring"""
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
_a : str= logging.get_logger(__name__)
class UpperCamelCase ( lowercase ):
def __init__(self : Optional[Any] , **_A : Optional[Any]) -> int:
requires_backends(self , ['bs4'])
super().__init__(**_A)
def _lowercase (self : Optional[int] , _A : str) -> Optional[Any]:
__snake_case : List[str] = []
__snake_case : Optional[int] = []
__snake_case : Any = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__snake_case : Optional[int] = parent.find_all(child.name , recursive=_A)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(_A) else next(i for i, s in enumerate(_A , 1) if s is child))
__snake_case : str = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def _lowercase (self : Any , _A : Optional[Any]) -> str:
__snake_case : Optional[Any] = BeautifulSoup(_A , 'html.parser')
__snake_case : Dict = []
__snake_case : List[Any] = []
__snake_case : Optional[int] = []
for element in html_code.descendants:
if type(_A) == bsa.element.NavigableString:
if type(element.parent) != bsa.element.Tag:
continue
__snake_case : Any = html.unescape(_A).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A)
__snake_case , __snake_case : List[Any] = self.xpath_soup(_A)
stringaxtag_seq.append(_A)
stringaxsubs_seq.append(_A)
if len(_A) != len(_A):
raise ValueError('Number of doc strings and xtags does not correspond')
if len(_A) != len(_A):
raise ValueError('Number of doc strings and xsubs does not correspond')
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def _lowercase (self : Optional[Any] , _A : Dict , _A : Optional[int]) -> Any:
__snake_case : Tuple = ''
for tagname, subs in zip(_A , _A):
xpath += f"/{tagname}"
if subs != 0:
xpath += f"[{subs}]"
return xpath
def __call__(self : str , _A : Union[str, Any]) -> BatchFeature:
__snake_case : str = False
# Check that strings has a valid type
if isinstance(_A , _A):
__snake_case : Optional[int] = True
elif isinstance(_A , (list, tuple)):
if len(_A) == 0 or isinstance(html_strings[0] , _A):
__snake_case : int = True
if not valid_strings:
raise ValueError(
'HTML strings must of type `str`, `List[str]` (batch of examples), '
f"but is of type {type(_A)}.")
__snake_case : int = bool(isinstance(_A , (list, tuple)) and (isinstance(html_strings[0] , _A)))
if not is_batched:
__snake_case : List[Any] = [html_strings]
# Get nodes + xpaths
__snake_case : int = []
__snake_case : Optional[Any] = []
for html_string in html_strings:
__snake_case , __snake_case , __snake_case : Optional[int] = self.get_three_from_single(_A)
nodes.append(_A)
__snake_case : Optional[Any] = []
for node, tag_list, sub_list in zip(_A , _A , _A):
__snake_case : Optional[Any] = self.construct_xpath(_A , _A)
xpath_strings.append(_A)
xpaths.append(_A)
# return as Dict
__snake_case : List[Any] = {'nodes': nodes, 'xpaths': xpaths}
__snake_case : Optional[int] = BatchFeature(data=_A , tensor_type=_A)
return encoded_inputs
| 95 | """simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCamelCase ( lowercase ):
@require_torch
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__snake_case : Tuple = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__snake_case : int = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__snake_case : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_A)
BertModel.from_pretrained(_A)
BertTokenizer.from_pretrained(_A)
pipeline(task='fill-mask' , model=_A)
# baseline - just load from_pretrained with normal network
__snake_case : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__snake_case : Union[str, Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : str = '1'
__snake_case : Union[str, Any] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : Union[str, Any]) -> Union[str, Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : Optional[Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
__snake_case : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
__snake_case : Union[str, Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
__snake_case : str = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_A)
BertModel.from_pretrained(_A)
BertTokenizer.from_pretrained(_A)
pipeline(task='fill-mask' , model=_A)
# baseline - just load from_pretrained with normal network
__snake_case : Any = [sys.executable, '-c', '\n'.join([load, run, mock])]
# should succeed
__snake_case : int = self.get_env()
__snake_case : Tuple = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : int) -> Any:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
__snake_case : int = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
__snake_case : int = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
__snake_case : Optional[int] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
__snake_case : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__snake_case : Optional[int] = self.get_env()
__snake_case : Dict = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# next emulate no network
__snake_case : Optional[Any] = [sys.executable, '-c', '\n'.join([load, mock, run])]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : Union[str, Any] = '1'
__snake_case : str = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
@require_torch
def _lowercase (self : str) -> Dict:
__snake_case : Dict = '\nfrom transformers import pipeline\n '
__snake_case : List[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
__snake_case : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
__snake_case : str = self.get_env()
__snake_case : Tuple = '1'
__snake_case : Tuple = [sys.executable, '-c', '\n'.join([load, mock, run])]
__snake_case : Optional[Any] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 1 , result.stderr)
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '') , )
@require_torch
def _lowercase (self : int) -> Optional[Any]:
__snake_case : int = '\nfrom transformers import AutoModel\n '
__snake_case : str = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
__snake_case : str = [sys.executable, '-c', '\n'.join([load, run])]
# should succeed
__snake_case : str = self.get_env()
__snake_case : Dict = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
__snake_case : List[str] = '1'
__snake_case : Optional[int] = subprocess.run(_A , env=_A , check=_A , capture_output=_A)
self.assertEqual(result.returncode , 0 , result.stderr)
self.assertIn('success' , result.stdout.decode())
| 95 | 1 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : Tuple = filter(lambda snake_case__ : p.requires_grad , model.parameters() )
_snake_case : str = sum([np.prod(p.size() ) for p in model_parameters] )
return params
A_ = logging.getLogger(__name__)
def UpperCAmelCase__ (snake_case__ : Optional[Any] , snake_case__ : Dict ):
"""simple docstring"""
if metric == "rouge2":
_snake_case : Any = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_snake_case : Tuple = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_snake_case : str = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
""" function.""" )
_snake_case : str = ModelCheckpoint(
dirpath=snake_case__ , filename=snake_case__ , monitor=F"val_{metric}" , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCAmelCase__ (snake_case__ : List[str] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode="""min""" if """loss""" in metric else """max""" , patience=snake_case__ , verbose=snake_case__ , )
class lowercase( pl.Callback ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[int], a_: Optional[int], a_: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = {f"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(a_ )
@rank_zero_only
def UpperCamelCase_ ( self: int, a_: pl.Trainer, a_: pl.LightningModule, a_: str, a_: Tuple=True ):
'''simple docstring'''
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
_snake_case : int = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_snake_case : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
_snake_case : Tuple = od / """test_results.txt"""
_snake_case : str = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_snake_case : Optional[int] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
_snake_case : Optional[Any] = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=a_ )
generations_file.parent.mkdir(exist_ok=a_ )
with open(a_, """a+""" ) as writer:
for key in sorted(a_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_snake_case : str = metrics[key]
if isinstance(a_, torch.Tensor ):
_snake_case : int = val.item()
_snake_case : int = f"{key}: {val:.6f}\n"
writer.write(a_ )
if not save_generations:
return
if "preds" in metrics:
_snake_case : str = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(a_ )
@rank_zero_only
def UpperCamelCase_ ( self: Tuple, a_: Optional[Any], a_: Any ):
'''simple docstring'''
try:
_snake_case : Dict = pl_module.model.model.num_parameters()
except AttributeError:
_snake_case : Tuple = pl_module.model.num_parameters()
_snake_case : int = count_trainable_parameters(a_ )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1E6, """grad_mp""": n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCamelCase_ ( self: List[str], a_: pl.Trainer, a_: pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
return self._write_logs(a_, a_, """test""" )
@rank_zero_only
def UpperCamelCase_ ( self: Tuple, a_: pl.Trainer, a_: Any ):
'''simple docstring'''
save_json(pl_module.metrics, pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 64 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase:
'''simple docstring'''
def __init__( self: List[Any], a_: List[str] ):
'''simple docstring'''
_snake_case : int = data
_snake_case : Dict = [0X67452301, 0Xefcdab89, 0X98badcfe, 0X10325476, 0Xc3d2e1f0]
@staticmethod
def UpperCamelCase_ ( a_: Optional[Any], a_: Dict ):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0Xffffffff
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = B"""\x80""" + B"""\x00""" * (63 - (len(self.data ) + 8) % 64)
_snake_case : Optional[int] = self.data + padding + struct.pack(""">Q""", 8 * len(self.data ) )
return padded_data
def UpperCamelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase_ ( self: Optional[Any], a_: List[Any] ):
'''simple docstring'''
_snake_case : List[str] = list(struct.unpack(""">16L""", a_ ) ) + [0] * 64
for i in range(16, 80 ):
_snake_case : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : Union[str, Any] = self.padding()
_snake_case : str = self.split_blocks()
for block in self.blocks:
_snake_case : Any = self.expand_block(a_ )
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
_snake_case : int = (b & c) | ((~b) & d)
_snake_case : str = 0X5a827999
elif 20 <= i < 40:
_snake_case : Optional[int] = b ^ c ^ d
_snake_case : str = 0X6ed9eba1
elif 40 <= i < 60:
_snake_case : List[Any] = (b & c) | (b & d) | (c & d)
_snake_case : List[Any] = 0X8f1bbcdc
elif 60 <= i < 80:
_snake_case : List[Any] = b ^ c ^ d
_snake_case : int = 0Xca62c1d6
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : Optional[int] = (
self.rotate(a_, 5 ) + f + e + k + expanded_block[i] & 0Xffffffff,
a,
self.rotate(a_, 30 ),
c,
d,
)
_snake_case : Union[str, Any] = (
self.h[0] + a & 0Xffffffff,
self.h[1] + b & 0Xffffffff,
self.h[2] + c & 0Xffffffff,
self.h[3] + d & 0Xffffffff,
self.h[4] + e & 0Xffffffff,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : Any = B"""Test String"""
assert SHAaHash(snake_case__ ).final_hash() == hashlib.shaa(snake_case__ ).hexdigest() # noqa: S324
def UpperCAmelCase__ ():
"""simple docstring"""
_snake_case : List[Any] = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
_snake_case : Union[str, Any] = parser.parse_args()
_snake_case : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
_snake_case : str = f.read()
else:
_snake_case : int = bytes(snake_case__ , """utf-8""" )
print(SHAaHash(snake_case__ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 64 | 1 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase ( unittest.TestCase ):
def A__ ( self):
lowercase = '''laion/clap-htsat-unfused'''
lowercase = tempfile.mkdtemp()
def A__ ( self ,**A__):
return RobertaTokenizer.from_pretrained(self.checkpoint ,**__lowerCamelCase)
def A__ ( self ,**A__):
return ClapFeatureExtractor.from_pretrained(self.checkpoint ,**__lowerCamelCase)
def A__ ( self):
shutil.rmtree(self.tmpdirname)
def A__ ( self):
lowercase = self.get_tokenizer()
lowercase = self.get_feature_extractor()
lowercase = ClapProcessor(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase)
processor.save_pretrained(self.tmpdirname)
lowercase = ClapProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer ,__lowerCamelCase)
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor ,__lowerCamelCase)
def A__ ( self):
lowercase = ClapProcessor(tokenizer=self.get_tokenizer() ,feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
lowercase = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''')
lowercase = self.get_feature_extractor(do_normalize=__lowerCamelCase ,padding_value=1.0)
lowercase = ClapProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=__lowerCamelCase ,padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer ,__lowerCamelCase)
self.assertEqual(processor.feature_extractor.to_json_string() ,feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor ,__lowerCamelCase)
def A__ ( self):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase)
lowercase = floats_list((3, 1_0_0_0))
lowercase = feature_extractor(__lowerCamelCase ,return_tensors='''np''')
lowercase = processor(audios=__lowerCamelCase ,return_tensors='''np''')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2)
def A__ ( self):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase)
lowercase = '''This is a test string'''
lowercase = processor(text=__lowerCamelCase)
lowercase = tokenizer(__lowerCamelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key])
def A__ ( self):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase)
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(__lowerCamelCase)
lowercase = tokenizer.batch_decode(__lowerCamelCase)
self.assertListEqual(__lowerCamelCase ,__lowerCamelCase)
def A__ ( self):
lowercase = self.get_feature_extractor()
lowercase = self.get_tokenizer()
lowercase = ClapProcessor(tokenizer=__lowerCamelCase ,feature_extractor=__lowerCamelCase)
self.assertListEqual(
processor.model_input_names[2:] ,feature_extractor.model_input_names ,msg='''`processor` and `feature_extractor` model input names do not match''' ,)
| 367 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
lowercase = [[0.0, 0.0], [0.0, 0.0]]
lowercase , lowercase = matrix[1][1], matrix[0][0]
lowercase , lowercase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
lowercase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
lowercase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 97 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class UpperCAmelCase ( A__ ):
__lowercase = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 237 |
import math
import sys
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : str = """"""
try:
with open(_lowercase , """rb""") as binary_file:
a__ : Any = binary_file.read()
for dat in data:
a__ : Dict = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Optional[Any] = {"""0""": """0""", """1""": """1"""}
a__ , a__ : Optional[int] = """""", """"""
a__ : int = len(_lowercase)
for i in range(len(_lowercase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a__ : List[str] = lexicon[curr_string]
result += last_match_id
a__ : Any = last_match_id + """0"""
if math.loga(_lowercase).is_integer():
a__ : Union[str, Any] = {}
for curr_key in list(_lowercase):
a__ : Optional[Any] = lexicon.pop(_lowercase)
a__ : Union[str, Any] = new_lex
a__ : str = last_match_id + """1"""
index += 1
a__ : List[Any] = """"""
return result
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : List[Any] = 8
try:
with open(_lowercase , """wb""") as opened_file:
a__ : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(_lowercase) , _lowercase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append("""10000000""")
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_lowercase , 2).to_bytes(1 , byteorder="""big"""))
except OSError:
print("""File not accessible""")
sys.exit()
def lowerCAmelCase_ ( _lowercase : str) -> str:
"""simple docstring"""
a__ : Any = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
a__ : Optional[Any] = data_bits[counter:]
a__ : Tuple = data_bits[counter + 1 :]
return data_bits
def lowerCAmelCase_ ( _lowercase : str , _lowercase : str) -> None:
"""simple docstring"""
a__ : Dict = read_file_binary(_lowercase)
a__ : str = remove_prefix(_lowercase)
a__ : List[str] = decompress_data(_lowercase)
write_file_binary(_lowercase , _lowercase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 170 | 0 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase :
def __init__( self :Tuple ,_UpperCamelCase :Tuple ,_UpperCamelCase :List[Any]=1_3 ,_UpperCamelCase :Optional[Any]=3_0 ,_UpperCamelCase :Tuple=2 ,_UpperCamelCase :List[Any]=3 ,_UpperCamelCase :int=True ,_UpperCamelCase :str=True ,_UpperCamelCase :Optional[Any]=3_2 ,_UpperCamelCase :Optional[int]=2 ,_UpperCamelCase :Union[str, Any]=4 ,_UpperCamelCase :List[Any]=3_7 ,_UpperCamelCase :List[str]="gelu" ,_UpperCamelCase :int=0.1 ,_UpperCamelCase :List[Any]=0.1 ,_UpperCamelCase :List[Any]=1_0 ,_UpperCamelCase :Optional[int]=0.02 ,_UpperCamelCase :Any=3 ,_UpperCamelCase :Union[str, Any]=0.6 ,_UpperCamelCase :Optional[Any]=None ,):
snake_case_ : Optional[int] = parent
snake_case_ : List[Any] = batch_size
snake_case_ : Tuple = image_size
snake_case_ : int = patch_size
snake_case_ : List[Any] = num_channels
snake_case_ : int = is_training
snake_case_ : List[Any] = use_labels
snake_case_ : List[str] = hidden_size
snake_case_ : Any = num_hidden_layers
snake_case_ : Dict = num_attention_heads
snake_case_ : List[str] = intermediate_size
snake_case_ : Any = hidden_act
snake_case_ : int = hidden_dropout_prob
snake_case_ : Union[str, Any] = attention_probs_dropout_prob
snake_case_ : List[Any] = type_sequence_label_size
snake_case_ : Union[str, Any] = initializer_range
snake_case_ : Optional[Any] = mask_ratio
snake_case_ : Dict = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case_ : Dict = (image_size // patch_size) ** 2
snake_case_ : List[Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def a__ ( self :str ):
snake_case_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Any = None
if self.use_labels:
snake_case_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case_ : Dict = self.get_config()
return config, pixel_values, labels
def a__ ( self :Union[str, Any] ):
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCamelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def a__ ( self :Tuple ,_UpperCamelCase :List[str] ,_UpperCamelCase :Any ,_UpperCamelCase :Dict ):
snake_case_ : Optional[Any] = TFViTMAEModel(config=_UpperCamelCase )
snake_case_ : Tuple = model(_UpperCamelCase ,training=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self :Any ,_UpperCamelCase :str ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :Dict ):
snake_case_ : str = TFViTMAEForPreTraining(_UpperCamelCase )
snake_case_ : List[Any] = model(_UpperCamelCase ,training=_UpperCamelCase )
# expected sequence length = num_patches
snake_case_ : List[Any] = (self.image_size // self.patch_size) ** 2
snake_case_ : Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case_ : Optional[Any] = 1
snake_case_ : int = TFViTMAEForPreTraining(_UpperCamelCase )
snake_case_ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ : Union[str, Any] = model(_UpperCamelCase ,training=_UpperCamelCase )
snake_case_ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def a__ ( self :int ):
snake_case_ : Tuple = self.prepare_config_and_inputs()
((snake_case_) , (snake_case_) , (snake_case_)) : Dict = config_and_inputs
snake_case_ : Tuple = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Optional[int] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase : int = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase : Union[str, Any] = False
lowercase : Tuple = False
lowercase : Optional[int] = False
lowercase : List[str] = False
def a__ ( self :List[str] ):
snake_case_ : Dict = TFViTMAEModelTester(self )
snake_case_ : List[str] = ConfigTester(self ,config_class=_UpperCamelCase ,has_text_modality=_UpperCamelCase ,hidden_size=3_7 )
def a__ ( self :Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def a__ ( self :List[str] ):
pass
def a__ ( self :Union[str, Any] ):
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class(_UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
snake_case_ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCamelCase ,tf.keras.layers.Layer ) )
def a__ ( self :Tuple ):
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Any = model_class(_UpperCamelCase )
snake_case_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : List[Any] = [*signature.parameters.keys()]
snake_case_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_UpperCamelCase )
def a__ ( self :Any ):
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def a__ ( self :int ):
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCamelCase )
def a__ ( self :List[Any] ):
# make the mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Union[str, Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : Optional[int] = model_class(_UpperCamelCase )
snake_case_ : int = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Any = model(_UpperCamelCase ,noise=_UpperCamelCase )
snake_case_ : List[Any] = copy.deepcopy(self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase ) )
snake_case_ : Dict = model(**_UpperCamelCase ,noise=_UpperCamelCase )
snake_case_ : Optional[Any] = outputs_dict[0].numpy()
snake_case_ : Dict = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1E-6 )
def a__ ( self :Optional[int] ):
# make the mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : int = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(_UpperCamelCase :Tuple ):
snake_case_ : Union[str, Any] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(_UpperCamelCase ):
snake_case_ : Any = v.numpy()
else:
snake_case_ : int = np.array(_UpperCamelCase )
return inputs_np_dict
for model_class in self.all_model_classes:
snake_case_ : List[str] = model_class(_UpperCamelCase )
snake_case_ : Tuple = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : str = prepare_numpy_arrays(_UpperCamelCase )
snake_case_ : Dict = model(_UpperCamelCase ,noise=_UpperCamelCase )
snake_case_ : int = model(**_UpperCamelCase ,noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :List[Any] ,_UpperCamelCase :List[Any] ,_UpperCamelCase :int ,_UpperCamelCase :Optional[Any] ):
# make masks reproducible
np.random.seed(2 )
snake_case_ : int = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
snake_case_ : Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : int = tf.constant(_UpperCamelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case_ : Dict = tf_noise
super().check_pt_tf_models(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
def a__ ( self :Dict ):
# make mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(_UpperCamelCase )
if module_member_name.endswith("""MainLayer""" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("""MainLayer""" )] == model_class.__name__[: -len("""Model""" )]
for module_member in (getattr(_UpperCamelCase ,_UpperCamelCase ),)
if isinstance(_UpperCamelCase ,_UpperCamelCase )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(_UpperCamelCase ,"""_keras_serializable""" ,_UpperCamelCase )
}
snake_case_ : Optional[int] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case_ : Any = tf.convert_to_tensor(_UpperCamelCase )
inputs_dict.update({"""noise""": noise} )
for main_layer_class in tf_main_layer_classes:
snake_case_ : Dict = main_layer_class(_UpperCamelCase )
snake_case_ : Union[str, Any] = {
name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
snake_case_ : Any = tf.keras.Model(_UpperCamelCase ,outputs=main_layer(_UpperCamelCase ) )
snake_case_ : List[Any] = model(_UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case_ : Optional[int] = os.path.join(_UpperCamelCase ,"""keras_model.h5""" )
model.save(_UpperCamelCase )
snake_case_ : Optional[int] = tf.keras.models.load_model(
_UpperCamelCase ,custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(_UpperCamelCase ,tf.keras.Model )
snake_case_ : Tuple = model(_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase )
@slow
def a__ ( self :Dict ):
# make mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : List[Any] = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : int = model_class(_UpperCamelCase )
snake_case_ : List[Any] = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : Any = model(_UpperCamelCase ,noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : List[Any] = outputs.last_hidden_state.numpy()
snake_case_ : Optional[Any] = 0
else:
snake_case_ : int = outputs.logits.numpy()
snake_case_ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCamelCase ,saved_model=_UpperCamelCase )
snake_case_ : int = model_class.from_pretrained(_UpperCamelCase )
snake_case_ : Union[str, Any] = model(_UpperCamelCase ,noise=_UpperCamelCase )
if model_class.__name__ == "TFViTMAEModel":
snake_case_ : Optional[int] = after_outputs["""last_hidden_state"""].numpy()
snake_case_ : List[Any] = 0
else:
snake_case_ : Any = after_outputs["""logits"""].numpy()
snake_case_ : Any = 0
snake_case_ : Optional[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCamelCase ,1E-5 )
def a__ ( self :Union[str, Any] ):
# make mask reproducible
np.random.seed(2 )
snake_case_ , snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : Tuple = int((config.image_size // config.patch_size) ** 2 )
snake_case_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
snake_case_ : str = model_class(_UpperCamelCase )
snake_case_ : Dict = self._prepare_for_class(_UpperCamelCase ,_UpperCamelCase )
snake_case_ : List[Any] = model(_UpperCamelCase ,noise=_UpperCamelCase )
snake_case_ : List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(_UpperCamelCase )
snake_case_ : Optional[Any] = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
snake_case_ : str = model_class.from_config(model.config )
snake_case_ : List[str] = new_model(_UpperCamelCase ) # Build model
new_model.set_weights(model.get_weights() )
snake_case_ : Optional[Any] = new_model(_UpperCamelCase ,noise=_UpperCamelCase )
self.assert_outputs_same(_UpperCamelCase ,_UpperCamelCase )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def a__ ( self :Tuple ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def a__ ( self :Dict ):
pass
@slow
def a__ ( self :int ):
snake_case_ : List[str] = TFViTMAEModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(_UpperCamelCase )
def UpperCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def a__ ( self :List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def a__ ( self :Union[str, Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
snake_case_ : int = TFViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" )
snake_case_ : Any = self.default_image_processor
snake_case_ : str = prepare_img()
snake_case_ : Dict = image_processor(images=_UpperCamelCase ,return_tensors="""tf""" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case_ : Any = ViTMAEConfig()
snake_case_ : Optional[int] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
snake_case_ : Optional[Any] = model(**_UpperCamelCase ,noise=_UpperCamelCase )
# verify the logits
snake_case_ : Optional[Any] = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape ,_UpperCamelCase )
snake_case_ : Any = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] ,_UpperCamelCase ,atol=1E-4 ) | 8 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase : Dict = StableDiffusionInpaintPipeline
lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowercase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowercase : Dict = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase : Optional[int] = frozenset([] )
def a__ ( self :Any ):
torch.manual_seed(0 )
snake_case_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=9 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCamelCase ,)
snake_case_ : Tuple = PNDMScheduler(skip_prk_steps=_UpperCamelCase )
torch.manual_seed(0 )
snake_case_ : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0 )
snake_case_ : Optional[int] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act="""gelu""" ,projection_dim=5_1_2 ,)
snake_case_ : Tuple = CLIPTextModel(_UpperCamelCase )
snake_case_ : Optional[int] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def a__ ( self :str ,_UpperCamelCase :Optional[int] ,_UpperCamelCase :Union[str, Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
snake_case_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
snake_case_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case_ : List[str] = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((6_4, 6_4) )
snake_case_ : Optional[Any] = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((6_4, 6_4) )
if str(_UpperCamelCase ).startswith("""mps""" ):
snake_case_ : Optional[Any] = torch.manual_seed(_UpperCamelCase )
else:
snake_case_ : Optional[int] = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
snake_case_ : int = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def a__ ( self :Any ):
snake_case_ : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Dict = StableDiffusionInpaintPipeline(**_UpperCamelCase )
snake_case_ : List[str] = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
snake_case_ : Union[str, Any] = self.get_dummy_inputs(_UpperCamelCase )
snake_case_ : Tuple = sd_pipe(**_UpperCamelCase ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__ ( self :Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def a__ ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
snake_case_ : str = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(_UpperCamelCase ,safety_checker=_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Dict = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def a__ ( self :Tuple ):
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
snake_case_ : Optional[int] = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCamelCase ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
snake_case_ : Optional[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : List[Any] = torch.manual_seed(0 )
snake_case_ : Any = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,output_type="""np""" ,)
snake_case_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def a__ ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
snake_case_ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
snake_case_ : int = """stabilityai/stable-diffusion-2-inpainting"""
snake_case_ : Dict = PNDMScheduler.from_pretrained(_UpperCamelCase ,subfolder="""scheduler""" )
snake_case_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCamelCase ,safety_checker=_UpperCamelCase ,scheduler=_UpperCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : List[Any] = """Face of a yellow cat, high resolution, sitting on a park bench"""
snake_case_ : Optional[int] = torch.manual_seed(0 )
snake_case_ : Tuple = pipe(
prompt=_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=2 ,output_type="""np""" ,)
snake_case_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9 | 8 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 |
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 42 | 1 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
a_ : Dict = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
def run_func(_UpperCAmelCase):
@wraps(_snake_case)
def run_in_eager_mode(*_UpperCAmelCase , **_UpperCAmelCase):
return func(*_snake_case , **_snake_case)
@wraps(_snake_case)
@tf.function(experimental_compile=_snake_case)
def run_in_graph_mode(*_UpperCAmelCase , **_UpperCAmelCase):
return func(*_snake_case , **_snake_case)
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.')
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = random.Random()
SCREAMING_SNAKE_CASE = [rng.randint(0 , vocab_size - 1) for i in range(batch_size * sequence_length)]
return tf.constant(_snake_case , shape=(batch_size, sequence_length) , dtype=tf.intaa)
class _snake_case ( __snake_case ):
_lowercase : int = 42
_lowercase : List[str] = 42
_lowercase : List[Any] = '''TensorFlow'''
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Optional[int]:
return tf.__version__
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
SCREAMING_SNAKE_CASE = self._prepare_inference_func(a_ , a_ , a_)
return self._measure_speed(_inference)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Tuple:
SCREAMING_SNAKE_CASE = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
SCREAMING_SNAKE_CASE = self._prepare_train_func(a_ , a_ , a_)
return self._measure_speed(_train)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Optional[Any]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_)
SCREAMING_SNAKE_CASE = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
SCREAMING_SNAKE_CASE = self._prepare_inference_func(a_ , a_ , a_)
return self._measure_memory(_inference)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> Optional[int]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , a_)
SCREAMING_SNAKE_CASE = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.')
SCREAMING_SNAKE_CASE = self._prepare_train_func(a_ , a_ , a_)
return self._measure_memory(_train)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> List[Any]:
SCREAMING_SNAKE_CASE = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.')
SCREAMING_SNAKE_CASE = (
hasattr(a_ , 'architectures')
and isinstance(config.architectures , a_)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE = __import__('transformers' , fromlist=[model_class])
SCREAMING_SNAKE_CASE = getattr(a_ , a_)
SCREAMING_SNAKE_CASE = model_cls(a_)
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
SCREAMING_SNAKE_CASE = TF_MODEL_MAPPING[config.__class__](a_)
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE = config.vocab_size if hasattr(a_ , 'vocab_size') else config.encoder.vocab_size
SCREAMING_SNAKE_CASE = random_input_ids(a_ , a_ , a_)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_forward():
return model(a_ , decoder_input_ids=a_ , training=a_)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_forward():
return model(a_ , training=a_)
SCREAMING_SNAKE_CASE = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def SCREAMING_SNAKE_CASE__ ( self , a , a , a) -> str:
SCREAMING_SNAKE_CASE = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.')
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.')
SCREAMING_SNAKE_CASE = (
hasattr(a_ , 'architectures')
and isinstance(config.architectures , a_)
and len(config.architectures) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE = __import__('transformers' , fromlist=[model_class])
SCREAMING_SNAKE_CASE = getattr(a_ , a_)
SCREAMING_SNAKE_CASE = model_cls(a_)
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.')
else:
SCREAMING_SNAKE_CASE = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](a_)
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE = config.vocab_size if hasattr(a_ , 'vocab_size') else config.encoder.vocab_size
SCREAMING_SNAKE_CASE = random_input_ids(a_ , a_ , a_)
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_decoder_train():
SCREAMING_SNAKE_CASE = model(a_ , decoder_input_ids=a_ , labels=a_ , training=a_)[0]
SCREAMING_SNAKE_CASE = tf.gradients(a_ , model.trainable_variables)
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla)
def encoder_train():
SCREAMING_SNAKE_CASE = model(a_ , labels=a_ , training=a_)[0]
SCREAMING_SNAKE_CASE = tf.gradients(a_ , model.trainable_variables)
return gradients
SCREAMING_SNAKE_CASE = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def SCREAMING_SNAKE_CASE__ ( self , a) -> Tuple:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation')
timeit.repeat(a_ , repeat=1 , number=5)
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
SCREAMING_SNAKE_CASE = timeit.repeat(
a_ , repeat=self.args.repeat , number=10 , )
return min(a_) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''')
def SCREAMING_SNAKE_CASE__ ( self , a) -> Any:
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.')
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.')
SCREAMING_SNAKE_CASE = start_memory_tracing('transformers')
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`')
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.')
SCREAMING_SNAKE_CASE = '''N/A'''
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.')
# init nvml
nvml.nvmlInit()
func()
SCREAMING_SNAKE_CASE = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx)
SCREAMING_SNAKE_CASE = nvml.nvmlDeviceGetMemoryInfo(a_)
SCREAMING_SNAKE_CASE = meminfo.used
SCREAMING_SNAKE_CASE = Memory(a_)
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.')
SCREAMING_SNAKE_CASE = None
else:
SCREAMING_SNAKE_CASE = measure_peak_memory_cpu(a_)
SCREAMING_SNAKE_CASE = Memory(a_) if isinstance(a_ , a_) else memory_bytes
if self.args.trace_memory_line_by_line:
SCREAMING_SNAKE_CASE = stop_memory_tracing(a_)
if memory is None:
SCREAMING_SNAKE_CASE = summary.total
else:
SCREAMING_SNAKE_CASE = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''')
return "N/A", None
| 352 |
import argparse
import datetime
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {
'0': 'Sunday',
'1': 'Monday',
'2': 'Tuesday',
'3': 'Wednesday',
'4': 'Thursday',
'5': 'Friday',
'6': 'Saturday',
}
SCREAMING_SNAKE_CASE = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(_UpperCAmelCase) < 11:
raise ValueError('Must be 10 characters long')
# Get month
SCREAMING_SNAKE_CASE = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12')
SCREAMING_SNAKE_CASE = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get day
SCREAMING_SNAKE_CASE = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31')
# Get second separator
SCREAMING_SNAKE_CASE = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'')
# Get year
SCREAMING_SNAKE_CASE = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?')
# Get datetime obj for validation
SCREAMING_SNAKE_CASE = datetime.date(int(_UpperCAmelCase) , int(_UpperCAmelCase) , int(_UpperCAmelCase))
# Start math
if m <= 2:
SCREAMING_SNAKE_CASE = y - 1
SCREAMING_SNAKE_CASE = m + 12
# maths var
SCREAMING_SNAKE_CASE = int(str(_UpperCAmelCase)[:2])
SCREAMING_SNAKE_CASE = int(str(_UpperCAmelCase)[2:])
SCREAMING_SNAKE_CASE = int(2.6 * m - 5.39)
SCREAMING_SNAKE_CASE = int(c / 4)
SCREAMING_SNAKE_CASE = int(k / 4)
SCREAMING_SNAKE_CASE = int(d + k)
SCREAMING_SNAKE_CASE = int(t + u + v + x)
SCREAMING_SNAKE_CASE = int(z - (2 * c))
SCREAMING_SNAKE_CASE = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.')
# Response
SCREAMING_SNAKE_CASE = F'''Your date {date_input}, is a {days[str(_UpperCAmelCase)]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : Tuple = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
a_ : Any = parser.parse_args()
zeller(args.date_input)
| 327 | 0 |
import argparse
import os
import re
UpperCamelCase = """src/transformers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase = re.compile(r"""\[([^\]]+)\]""")
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = _re_indent.search(SCREAMING_SNAKE_CASE )
return "" if search is None else search.groups()[0]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ):
A_ : str = 0
A_ : Optional[Any] = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE ):
index += 1
A_ : Tuple = ['''\n'''.join(lines[:index] )]
else:
A_ : Dict = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A_ : Optional[int] = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE ) )
if index < len(SCREAMING_SNAKE_CASE ) - 1:
A_ : List[str] = [lines[index + 1]]
index += 1
else:
A_ : Union[str, Any] = []
else:
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE ) )
A_ : Union[str, Any] = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE ) > 0:
blocks.append('''\n'''.join(SCREAMING_SNAKE_CASE ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
def _inner(SCREAMING_SNAKE_CASE ):
return key(SCREAMING_SNAKE_CASE ).lower().replace('''_''' , '''''' )
return _inner
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ):
# If no key is provided, we use a noop.
def noop(SCREAMING_SNAKE_CASE ):
return x
if key is None:
A_ : Union[str, Any] = noop
# Constants are all uppercase, they go first.
A_ : Optional[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A_ : List[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE )[0].isupper() and not key(SCREAMING_SNAKE_CASE ).isupper()]
# Functions begin with a lowercase, they go last.
A_ : Union[str, Any] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE )[0].isupper()]
A_ : Union[str, Any] = ignore_underscore(SCREAMING_SNAKE_CASE )
return sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE ) + sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
# This inner function sort imports between [ ].
def _replace(SCREAMING_SNAKE_CASE ):
A_ : List[Any] = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
A_ : Optional[Any] = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ : List[str] = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE )] ) + "]"
A_ : Dict = import_statement.split('''\n''' )
if len(SCREAMING_SNAKE_CASE ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A_ : Dict = 2 if lines[1].strip() == '''[''' else 1
A_ : Tuple = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A_ : Tuple = sort_objects(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )
A_ : str = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A_ : Tuple = _re_bracket_content.sub(_replace , lines[1] )
else:
A_ : Optional[Any] = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A_ : str = keys[:-1]
A_ : Dict = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(SCREAMING_SNAKE_CASE )] )
return "\n".join(SCREAMING_SNAKE_CASE )
else:
# Finally we have to deal with imports fitting on one line
A_ : List[Any] = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE )
return import_statement
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=True ):
with open(SCREAMING_SNAKE_CASE , encoding='''utf-8''' ) as f:
A_ : Any = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A_ : int = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A_ : Optional[Any] = main_blocks[block_idx]
A_ : Any = block.split('''\n''' )
# Get to the start of the imports.
A_ : Any = 0
while line_idx < len(SCREAMING_SNAKE_CASE ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A_ : str = len(SCREAMING_SNAKE_CASE )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE ):
continue
# Ignore beginning and last line: they don't contain anything.
A_ : List[str] = '''\n'''.join(block_lines[line_idx:-1] )
A_ : Any = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A_ : Union[str, Any] = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE , indent_level=SCREAMING_SNAKE_CASE )
# We have two categories of import key: list or _import_structure[key].append/extend
A_ : Dict = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A_ : Union[str, Any] = [(pattern.search(SCREAMING_SNAKE_CASE ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A_ : List[Any] = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE ) if key is not None]
A_ : List[Any] = [x[0] for x in sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A_ : Tuple = 0
A_ : Union[str, Any] = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
A_ : List[Any] = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(SCREAMING_SNAKE_CASE )
count += 1
# And we put our main block back together with its first and last line.
A_ : Union[str, Any] = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE=True ):
A_ : Optional[Any] = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
A_ : Any = sort_imports(os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' ) , check_only=SCREAMING_SNAKE_CASE )
if result:
A_ : Tuple = [os.path.join(SCREAMING_SNAKE_CASE , '''__init__.py''' )]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError(f'''Would overwrite {len(SCREAMING_SNAKE_CASE )} files, run `make style`.''' )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 186 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , **_SCREAMING_SNAKE_CASE , )->None:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
A_ : Tuple = size if size is not None else {'''shortest_edge''': 224}
A_ : Any = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Tuple = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
A_ : str = do_resize
A_ : Tuple = size
A_ : Optional[Any] = resample
A_ : Tuple = do_center_crop
A_ : List[Any] = crop_size
A_ : Optional[int] = do_rescale
A_ : Tuple = rescale_factor
A_ : Any = do_normalize
A_ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : Any = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Any = do_convert_rgb
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : Dict = get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A_ : Any = get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=_SCREAMING_SNAKE_CASE )
return resize(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
A_ : str = get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->int:
'''simple docstring'''
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )->np.ndarray:
'''simple docstring'''
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )->PIL.Image.Image:
'''simple docstring'''
A_ : Optional[int] = do_resize if do_resize is not None else self.do_resize
A_ : int = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : List[Any] = resample if resample is not None else self.resample
A_ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : List[str] = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : int = do_normalize if do_normalize is not None else self.do_normalize
A_ : Tuple = image_mean if image_mean is not None else self.image_mean
A_ : Tuple = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : Optional[int] = make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : List[str] = [convert_to_rgb(_SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
A_ : int = [to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
A_ : Tuple = [self.resize(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , resample=_SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
A_ : Union[str, Any] = [self.center_crop(image=_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
A_ : Tuple = [self.rescale(image=_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
A_ : List[Any] = [self.normalize(image=_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE ) for image in images]
A_ : str = [to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
A_ : Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 186 | 1 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [0] * len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : Any = [1] * len(__lowerCAmelCase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCAmelCase )
while queue:
SCREAMING_SNAKE_CASE__ : str = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ : str = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__lowerCAmelCase )
print(max(__lowerCAmelCase ) )
# Adjacency list of Graph
a :int = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 354 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=False , _a=True , _a="None" , _a=3 , _a=4 , _a=None , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = seq_length
SCREAMING_SNAKE_CASE__ : str = is_training
SCREAMING_SNAKE_CASE__ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE__ : str = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : List[str] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = num_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = num_choices
SCREAMING_SNAKE_CASE__ : List[str] = relative_attention
SCREAMING_SNAKE_CASE__ : str = position_biased_input
SCREAMING_SNAKE_CASE__ : List[str] = pos_att_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : str = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_config()
SCREAMING_SNAKE_CASE__ : Any = 300
return config
def _a ( self , _a ) -> List[str]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a , token_type_ids=_a )[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForMaskedLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = DebertaForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : Any = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_a )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , _a , _a , _a , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = DebertaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE :str = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE :Union[str, Any] = True
_SCREAMING_SNAKE_CASE :str = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Dict = False
_SCREAMING_SNAKE_CASE :Union[str, Any] = False
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = DebertaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=_a , hidden_size=37 )
def _a ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_a )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_a )
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_a )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_a )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_a )
@slow
def _a ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Dict = DebertaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a (unittest.TestCase):
'''simple docstring'''
@unittest.skip(reason="""Model not available yet""" )
def _a ( self ) -> Any:
"""simple docstring"""
pass
@slow
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = DebertaModel.from_pretrained("""microsoft/deberta-base""" )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_a , attention_mask=_a )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 56 | 0 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowercase : List[str] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowercase : List[str] = """cuda""" if torch.cuda.is_available() else """cpu"""
def A_ ( A__ , A__=100 , A__=" " ) -> List[str]:
a__ : List[Any] = text.split(A__ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(A__ ) , A__ )]
def A_ ( A__ ) -> dict:
a__ , a__ : Tuple = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(A__ ):
titles.append(title if title is not None else '' )
texts.append(A__ )
return {"title": titles, "text": texts}
def A_ ( A__ , A__ , A__ ) -> dict:
a__ : Optional[int] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=A__ , padding='longest' , return_tensors='pt' )['input_ids']
a__ : List[Any] = ctx_encoder(input_ids.to(device=A__ ) , return_dict=A__ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def A_ ( A__ , A__ , A__ , ) -> Dict:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
a__ : Optional[int] = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
a__ : Any = dataset.map(A__ , batched=A__ , num_proc=processing_args.num_proc )
# And compute the embeddings
a__ : Any = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=A__ )
a__ : int = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
a__ : Union[str, Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
a__ : str = dataset.map(
partial(A__ , ctx_encoder=A__ , ctx_tokenizer=A__ ) , batched=A__ , batch_size=processing_args.batch_size , features=A__ , )
# And finally save your dataset
a__ : Any = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(A__ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
a__ : Optional[Any] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=A__ )
# And save the index
a__ : Optional[int] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(A__ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
"""simple docstring"""
__A : str = field(
default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''dummy-kb''' / '''my_knowledge_dataset.csv''' ) , metadata={'''help''': '''Path to a tab-separated csv file with columns \'title\' and \'text\''''} , )
__A : Optional[str] = field(
default=__UpperCAmelCase , metadata={'''help''': '''Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'''} , )
__A : str = field(
default='''facebook/rag-sequence-nq''' , metadata={'''help''': '''The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''''} , )
__A : str = field(
default='''facebook/dpr-ctx_encoder-multiset-base''' , metadata={
'''help''': (
'''The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'''
''' \'facebook/dpr-ctx_encoder-multiset-base\''''
)
} , )
__A : Optional[str] = field(
default=str(Path(__UpperCAmelCase ).parent / '''test_run''' / '''dummy-kb''' ) , metadata={'''help''': '''Path to a directory where the dataset passages and the index will be saved'''} , )
@dataclass
class A__ :
"""simple docstring"""
__A : Optional[int] = field(
default=__UpperCAmelCase , metadata={
'''help''': '''The number of processes to use to split the documents into passages. Default is single process.'''
} , )
__A : int = field(
default=1_6 , metadata={
'''help''': '''The batch size to use when computing the passages embeddings using the DPR context encoder.'''
} , )
@dataclass
class A__ :
"""simple docstring"""
__A : int = field(
default=7_6_8 , metadata={'''help''': '''The dimension of the embeddings to pass to the HNSW Faiss index.'''} , )
__A : int = field(
default=1_2_8 , metadata={
'''help''': (
'''The number of bi-directional links created for every new element during the HNSW index construction.'''
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowercase : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowercase , lowercase , lowercase : Optional[Any] = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowercase : Any = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 99 |
from __future__ import annotations
from math import pi, sqrt
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__a = logging.get_logger(__name__)
__a = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "bloom"
lowercase = ["past_key_values"]
lowercase = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : str , snake_case_ : Union[str, Any]=250_880 , snake_case_ : Optional[Any]=64 , snake_case_ : Any=2 , snake_case_ : int=8 , snake_case_ : Optional[Any]=1E-5 , snake_case_ : int=0.02 , snake_case_ : Dict=True , snake_case_ : List[Any]=1 , snake_case_ : Tuple=2 , snake_case_ : Any=False , snake_case_ : str=0.0 , snake_case_ : Any=0.0 , snake_case_ : List[str]=1 , snake_case_ : Union[str, Any]=False , **snake_case_ : int , ):
snake_case__ : Any = vocab_size
# Backward compatibility with n_embed kwarg
snake_case__ : Dict = kwargs.pop("""n_embed""" , snake_case_ )
snake_case__ : Union[str, Any] = hidden_size if n_embed is None else n_embed
snake_case__ : List[str] = n_layer
snake_case__ : Dict = n_head
snake_case__ : Any = layer_norm_epsilon
snake_case__ : Tuple = initializer_range
snake_case__ : Any = use_cache
snake_case__ : int = pretraining_tp
snake_case__ : List[str] = apply_residual_connection_post_layernorm
snake_case__ : Optional[Any] = hidden_dropout
snake_case__ : Dict = attention_dropout
snake_case__ : List[str] = bos_token_id
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Any = slow_but_exact
super().__init__(bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = version.parse("1.12" )
def __init__( self : Union[str, Any] , snake_case_ : PretrainedConfig , snake_case_ : str = "default" , snake_case_ : List[PatchingSpec] = None , snake_case_ : bool = False , ):
super().__init__(snake_case_ , task=snake_case_ , patching_specs=snake_case_ , use_past=snake_case_ )
if not getattr(self._config , """pad_token_id""" , snake_case_ ):
# TODO: how to do that better?
snake_case__ : Optional[Any] = 0
@property
def lowerCamelCase ( self : str ):
snake_case__ : Union[str, Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(snake_case_ , direction="""inputs""" , inverted_values_shape=snake_case_ )
snake_case__ : Optional[int] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
snake_case__ : List[str] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def lowerCamelCase ( self : Optional[int] ):
return self._config.n_layer
@property
def lowerCamelCase ( self : Optional[Any] ):
return self._config.n_head
@property
def lowerCamelCase ( self : str ):
return 1E-3
def lowerCamelCase ( self : Tuple , snake_case_ : "PreTrainedTokenizer" , snake_case_ : int = -1 , snake_case_ : int = -1 , snake_case_ : bool = False , snake_case_ : Optional["TensorType"] = None , ):
snake_case__ : str = super(snake_case_ , self ).generate_dummy_inputs(
snake_case_ , batch_size=snake_case_ , seq_length=snake_case_ , is_pair=snake_case_ , framework=snake_case_ )
# We need to order the input in the way they appears in the forward()
snake_case__ : List[Any] = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ : Any = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : Dict = seqlen + 2
snake_case__ : str = self._config.hidden_size // self.num_attention_heads
snake_case__ : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
snake_case__ : Optional[int] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
snake_case__ : str = [
(torch.zeros(snake_case_ ), torch.zeros(snake_case_ )) for _ in range(self.num_layers )
]
snake_case__ : str = common_inputs["""attention_mask"""]
if self.use_past:
snake_case__ : Dict = ordered_inputs["""attention_mask"""].dtype
snake_case__ : Optional[Any] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(snake_case_ , snake_case_ , dtype=snake_case_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self : Optional[int] ):
return 13
| 364 |
'''simple docstring'''
def __snake_case( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
__a = generate_large_matrix()
__a = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __snake_case( _lowerCAmelCase ) -> None:
assert all(row == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for row in grid )
assert all(list(_lowerCAmelCase ) == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for col in zip(*_lowerCAmelCase ) )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[str] = 0
snake_case__ : str = len(_lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
snake_case__ : List[Any] = (left + right) // 2
snake_case__ : Tuple = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
snake_case__ : Tuple = mid + 1
else:
snake_case__ : Tuple = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCAmelCase )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Optional[int] = 0
snake_case__ : Optional[int] = len(grid[0] )
for i in range(len(_lowerCAmelCase ) ):
snake_case__ : Any = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCAmelCase ) * len(grid[0] )) - total
def __snake_case( _lowerCAmelCase ) -> int:
return len([number for row in grid for number in row if number < 0] )
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : List[Any] = 0
for row in grid:
for i, number in enumerate(_lowerCAmelCase ):
if number < 0:
total += len(_lowerCAmelCase ) - i
break
return total
def __snake_case( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
snake_case__ : int = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
snake_case__ : Tuple = timeit(f"{func}(grid=grid)" , setup=_lowerCAmelCase , number=500 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 43 | 0 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
a = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
a = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
a = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def __lowercase ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def __lowercase ( self : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : List[Any]="auto" , lowerCAmelCase : List[Any]=-1 , lowerCAmelCase : Any=0.9 , lowerCAmelCase : int=5 , lowerCAmelCase : Dict=500 , lowerCAmelCase : Dict="gpt2-large" , lowerCAmelCase : str=-1 , lowerCAmelCase : int=1024 , lowerCAmelCase : Optional[int]=25 , lowerCAmelCase : str=5 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[Any]=25 , ):
lowerCAmelCase = compute_mauve(
p_text=lowerCAmelCase , q_text=lowerCAmelCase , p_features=lowerCAmelCase , q_features=lowerCAmelCase , p_tokens=lowerCAmelCase , q_tokens=lowerCAmelCase , num_buckets=lowerCAmelCase , pca_max_data=lowerCAmelCase , kmeans_explained_var=lowerCAmelCase , kmeans_num_redo=lowerCAmelCase , kmeans_max_iter=lowerCAmelCase , featurize_model_name=lowerCAmelCase , device_id=lowerCAmelCase , max_text_length=lowerCAmelCase , divergence_curve_discretization_size=lowerCAmelCase , mauve_scaling_factor=lowerCAmelCase , verbose=lowerCAmelCase , seed=lowerCAmelCase , )
return out
| 155 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
a = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( _a ):
def __init__( self : Tuple , *lowerCAmelCase : Tuple , **lowerCAmelCase : str ):
warnings.warn(
"""The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use FlavaImageProcessor instead.""" , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 155 | 1 |
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 360 |
'''simple docstring'''
def UpperCAmelCase_ (__a : list[int] , __a : list[int] ):
"""simple docstring"""
if not len(__a ) == len(__a ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_a, _a, _a : Tuple = equationa
_a, _a, _a : str = equationa
# Calculate the determinants of the matrices
_a : Union[str, Any] = aa * ba - aa * ba
_a : List[Any] = ca * ba - ca * ba
_a : List[Any] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_a : int = determinant_x / determinant
_a : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 5 | 0 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowercase )
a : List[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowercase )
env_command_parser(subparsers=_lowercase )
launch_command_parser(subparsers=_lowercase )
tpu_command_parser(subparsers=_lowercase )
test_command_parser(subparsers=_lowercase )
# Let's go
a : List[Any] = parser.parse_args()
if not hasattr(_lowercase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowercase )
if __name__ == "__main__":
main()
| 105 |
lowerCamelCase : Tuple = {'''a''': ['''c''', '''b'''], '''b''': ['''d''', '''e'''], '''c''': [], '''d''': [], '''e''': []}
lowerCamelCase : int = ['''a''', '''b''', '''c''', '''d''', '''e''']
def snake_case_ ( lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] ):
__lowercase : Dict = start
# add current to visited
visited.append(lowerCAmelCase_ )
__lowercase : Dict = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__lowercase : List[Any] = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# if all neighbors visited add current to sort
sort.append(lowerCAmelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ):
for vertice in vertices:
if vertice not in visited:
__lowercase : Tuple = topological_sort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase : Any = topological_sort('''a''', [], [])
print(sort) | 233 | 0 |
"""simple docstring"""
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase__ = logging.getLogger(__name__)
def _a ( ) -> str:
a = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=a__ , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=a__ , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=a__ , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=a__ , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=a__ , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=a__ , type=a__ , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=a__ , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=a__ , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
a = parser.parse_args()
return args
def _a ( a :Union[str, Any] ) -> Tuple:
def fn(a :Tuple ):
return tokenizer(examples['''text'''] )
return fn
def _a ( a :Optional[int] ) -> Union[str, Any]:
a = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
a = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
a = tf.train.Features(feature=a__ )
a = tf.train.Example(features=a__ )
a = example.SerializeToString()
records.append(a__ )
return records
def _a ( a :Any ) -> List[Any]:
a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
a = min(len(a__ ) , args.limit )
a = dataset.select(range(a__ ) )
print(F"""Limiting the dataset to {args.limit} entries.""" )
a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
a = os.path.join(args.output_dir , args.split )
if not os.path.exists(a__ ):
os.makedirs(a__ )
else:
a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
a = tokenize_function(a__ )
a = dataset.map(a__ , batched=a__ , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a :List[Any] ):
# Concatenate all texts.
a = {k: sum(examples[k] , [] ) for k in examples.keys()}
a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
a = {
k: [t[i : i + args.max_length] for i in range(0 , a__ , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
a = dataset_tokenized.map(a__ , batched=a__ , batch_size=1_000 , num_proc=4 )
a = 0
a = 0
for shard in range(0 , len(a__ ) , args.shard_size ):
a = grouped_dataset[shard : shard + args.shard_size]
a = len(dataset_snapshot['''input_ids'''] )
a = os.path.join(a__ , F"""dataset-{shard_count}-{records_containing}.tfrecord""" )
a = get_serialized_examples(a__ )
with tf.io.TFRecordWriter(a__ ) as out_file:
for i in range(len(a__ ) ):
a = serialized_examples[i]
out_file.write(a__ )
print('''Wrote file {} containing {} records'''.format(a__ , a__ ) )
shard_count += 1
total_records += records_containing
with open(F"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(F"""Total {args.split} records: {total_records}""" , file=a__ )
if __name__ == "__main__":
UpperCAmelCase__ = parse_args()
main(args)
| 368 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : int = logging.get_logger(__name__)
class a ( _lowerCamelCase , _lowerCamelCase ):
snake_case_ = "maskformer-swin"
snake_case_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , lowercase_ : List[str]=224 , lowercase_ : List[str]=4 , lowercase_ : List[str]=3 , lowercase_ : Tuple=96 , lowercase_ : Any=[2, 2, 6, 2] , lowercase_ : Any=[3, 6, 12, 24] , lowercase_ : Any=7 , lowercase_ : int=4.0 , lowercase_ : str=True , lowercase_ : List[str]=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Optional[int]="gelu" , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=0.02 , lowercase_ : int=1e-5 , lowercase_ : Any=None , lowercase_ : int=None , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = depths
snake_case_ = len(lowercase_ )
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case_ = int(embed_dim * 2 ** (len(lowercase_ ) - 1) )
snake_case_ = ['''stem'''] + [F"stage{idx}" for idx in range(1 , len(lowercase_ ) + 1 )]
snake_case_ ,snake_case_ = get_aligned_output_features_output_indices(
out_features=lowercase_ , out_indices=lowercase_ , stage_names=self.stage_names )
| 56 |
snake_case : str = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
snake_case : List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
snake_case : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 94 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
SCREAMING_SNAKE_CASE = [{"type": "code", "content": INSTALL_CONTENT}]
SCREAMING_SNAKE_CASE = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 230 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
A__ = model.config
A__ = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=1_28 , )
A__ = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
if "encoder.model" in name:
A__ = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
A__ = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
A__ = "encoder." + name
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
A__ = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
A__ = "encoder.layernorm.bias"
return name
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Any:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[3] )
A__ = int(key_split[5] )
A__ = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
A__ = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> Dict:
# load original model
A__ = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
A__, A__ = get_configs(lowercase_ )
A__ = DonutSwinModel(lowercase_ )
A__ = MBartForCausalLM(lowercase_ )
A__ = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
A__ = original_model.state_dict()
A__ = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
A__ = load_dataset("hf-internal-testing/example-documents" )
A__ = dataset["test"][0]["image"].convert("RGB" )
A__ = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
A__ = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
A__ = DonutProcessor(lowercase_ , lowercase_ )
A__ = processor(lowercase_ , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
A__ = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
A__ = "When is the coffee break?"
A__ = task_prompt.replace("{user_input}" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
A__ = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
A__ = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
A__ = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
A__ = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
A__ = "hello world"
else:
raise ValueError("Model name not supported" )
A__ = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="pt" )[
"input_ids"
]
A__ = original_model.encoder.model.patch_embed(lowercase_ )
A__, A__ = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
A__ = original_model.encoder(lowercase_ )
A__ = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
A__ = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
A__ = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 230 | 1 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_=28123 ):
UpperCAmelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCAmelCase = set()
UpperCAmelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 78 |
"""simple docstring"""
def _lowerCAmelCase ( lowercase_ , lowercase_ = " " ):
UpperCAmelCase = []
UpperCAmelCase = 0
for index, char in enumerate(lowercase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase = index + 1
elif index + 1 == len(lowercase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 78 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCamelCase: List[str] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase: Any = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_UpperCamelCase: Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 53 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, unittest.TestCase ):
_lowerCamelCase = StableDiffusionInstructPixaPixPipeline
_lowerCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
_lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase ( self : str ) -> str:
torch.manual_seed(0 )
lowercase : int = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, )
lowercase : int = PNDMScheduler(skip_prk_steps=lowerCAmelCase )
torch.manual_seed(0 )
lowercase : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, )
torch.manual_seed(0 )
lowercase : Union[str, Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
lowercase : Optional[int] = CLIPTextModel(lowerCAmelCase )
lowercase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase ( self : int, lowerCAmelCase : str, lowerCAmelCase : Tuple=0 ) -> Tuple:
lowercase : Optional[int] = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
lowercase : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowercase : Any = Image.fromarray(np.uinta(lowerCAmelCase ) ).convert('RGB' )
if str(lowerCAmelCase ).startswith('mps' ):
lowercase : str = torch.manual_seed(lowerCAmelCase )
else:
lowercase : Optional[Any] = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowercase : Union[str, Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : Tuple ) -> Optional[Any]:
lowercase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : int = self.get_dummy_components()
lowercase : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : Union[str, Any] = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Union[str, Any] = sd_pipe(**lowerCAmelCase ).images
lowercase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : str = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> int:
lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : Union[str, Any] = self.get_dummy_components()
lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : Union[str, Any] = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Any = 'french fries'
lowercase : Tuple = sd_pipe(**lowerCAmelCase, negative_prompt=lowerCAmelCase )
lowercase : Optional[Any] = output.images
lowercase : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Dict = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Optional[int] ) -> str:
lowercase : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : str = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Any = self.get_dummy_inputs(lowerCAmelCase )
lowercase : int = [inputs['prompt']] * 2
lowercase : Dict = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
lowercase : Optional[int] = torch.from_numpy(lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
lowercase : List[Any] = image / 2 + 0.5
lowercase : List[str] = image.permute(0, 3, 1, 2 )
lowercase : List[str] = image.repeat(2, 1, 1, 1 )
lowercase : Dict = sd_pipe(**lowerCAmelCase ).images
lowercase : Union[str, Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase : Optional[int] = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : Union[str, Any] ) -> Any:
lowercase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] = self.get_dummy_components()
lowercase : str = EulerAncestralDiscreteScheduler(
beta_start=0.0_0085, beta_end=0.012, beta_schedule='scaled_linear' )
lowercase : Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : str = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Dict = self.get_dummy_inputs(lowerCAmelCase )
lowercase : Dict = sd_pipe(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -3:, -3:, -1]
lowercase : Optional[int] = [round(lowerCAmelCase, 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase : List[str] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase ( self : List[str] ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowercase ( self : int ) -> Optional[Any]:
lowercase : List[Any] = self.get_dummy_components()
lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase )
lowercase : List[str] = VaeImageProcessor(do_resize=lowerCAmelCase, do_normalize=lowerCAmelCase )
lowercase : int = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowercase : Dict = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='pt' ) )[0]
lowercase : Optional[Any] = components['vae']
lowercase : str = self.get_dummy_inputs_by_type(lowerCAmelCase, input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase : Optional[int] = pipe(**lowerCAmelCase )[0]
lowercase : int = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase, 1e-4, 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def lowercase ( self : List[str] ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase ( self : List[str], lowerCAmelCase : int=0 ) -> str:
lowercase : Dict = torch.manual_seed(lowerCAmelCase )
lowercase : Optional[Any] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
lowercase : int = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def lowercase ( self : int ) -> str:
lowercase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : Optional[Any] = self.get_inputs()
lowercase : List[Any] = pipe(**lowerCAmelCase ).images
lowercase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : List[str] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase )
lowercase : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : List[Any] = self.get_inputs()
lowercase : Tuple = pipe(**lowerCAmelCase ).images
lowercase : Dict = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : str = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Optional[Any] ) -> List[Any]:
lowercase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase )
lowercase : List[Any] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : Any = self.get_inputs()
lowercase : Union[str, Any] = pipe(**lowerCAmelCase ).images
lowercase : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : int = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowercase ( self : Tuple ) -> Dict:
lowercase : Dict = 0
def callback_fn(lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : torch.FloatTensor ) -> None:
lowercase : Optional[Any] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Union[str, Any] = latents[0, -3:, -3:, -1]
lowercase : List[str] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase : Optional[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Union[str, Any] = latents[0, -3:, -3:, -1]
lowercase : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase : Union[str, Any] = False
lowercase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Optional[int] = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase, callback=lowerCAmelCase, callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix', safety_checker=lowerCAmelCase, torch_dtype=torch.floataa )
lowercase : Dict = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : Dict = self.get_inputs()
lowercase : List[Any] = pipe(**lowerCAmelCase )
lowercase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowercase ( self : Union[str, Any] ) -> Tuple:
lowercase : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase : Optional[Any] = inputs['image'].resize((504, 504) )
lowercase : Union[str, Any] = 'timbrooks/instruct-pix2pix'
lowercase : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase, safety_checker=lowerCAmelCase, )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
lowercase : str = pipe(**lowerCAmelCase )
lowercase : int = output.images[0]
lowercase : int = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase : Union[str, Any] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 53 | 1 |
def _A ( SCREAMING_SNAKE_CASE__ : list[list[int]] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : set ):
UpperCamelCase , UpperCamelCase :Union[str, Any] = len(SCREAMING_SNAKE_CASE__ ), len(grid[0] )
if (
min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCamelCase :Dict = 0
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , row - 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__ )
count += depth_first_search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , col - 1 , SCREAMING_SNAKE_CASE__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
from __future__ import annotations
from typing import Any
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 259 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : str , snake_case : Dict , snake_case : Union[str, Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : Union[str, Any]=True , snake_case : int=True , snake_case : List[str]=99 , snake_case : str=32 , snake_case : Dict=5 , snake_case : List[str]=4 , snake_case : Dict=37 , snake_case : Dict="gelu" , snake_case : Any=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : List[str]=512 , snake_case : Optional[int]=16 , snake_case : Dict=2 , snake_case : int=0.02 , snake_case : Any=3 , snake_case : Tuple=4 , snake_case : int=None , ):
'''simple docstring'''
A__ : Tuple = parent
A__ : List[Any] = batch_size
A__ : Optional[int] = seq_length
A__ : Optional[Any] = is_training
A__ : List[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : List[Any] = vocab_size
A__ : Dict = hidden_size
A__ : int = num_hidden_layers
A__ : Optional[int] = num_attention_heads
A__ : int = intermediate_size
A__ : str = hidden_act
A__ : Dict = hidden_dropout_prob
A__ : Optional[int] = attention_probs_dropout_prob
A__ : Tuple = max_position_embeddings
A__ : int = type_vocab_size
A__ : Any = type_sequence_label_size
A__ : Tuple = initializer_range
A__ : Optional[int] = num_labels
A__ : Any = num_choices
A__ : Optional[Any] = scope
A__ : List[str] = self.vocab_size - 1
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict = None
if self.use_token_type_ids:
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Dict = None
A__ : List[str] = None
A__ : str = None
if self.use_labels:
A__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : int = ids_tensor([self.batch_size] , self.num_choices )
A__ : int = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
A__ : Any = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : List[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Optional[Any] = OpenAIGPTModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , token_type_ids=snake_case , head_mask=snake_case )
A__ : Tuple = model(snake_case , token_type_ids=snake_case )
A__ : List[str] = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : int , snake_case : str , snake_case : List[Any] , snake_case : Tuple , *snake_case : Any ):
'''simple docstring'''
A__ : str = OpenAIGPTLMHeadModel(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : Dict , snake_case : Any , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : List[str] = OpenAIGPTDoubleHeadsModel(snake_case )
model.to(snake_case )
model.eval()
A__ : Optional[Any] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : List[str] , snake_case : Optional[int] , *snake_case : Dict ):
'''simple docstring'''
A__ : List[str] = self.num_labels
A__ : str = OpenAIGPTForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : List[Any] = model(snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : int = self.prepare_config_and_inputs()
(
A__
) : Any = config_and_inputs
A__ : str = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
snake_case_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
snake_case_ = (
{
'feature-extraction': OpenAIGPTModel,
'text-classification': OpenAIGPTForSequenceClassification,
'text-generation': OpenAIGPTLMHeadModel,
'zero-shot': OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCamelCase ( self : int , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Optional[int] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _UpperCamelCase ( self : Tuple , snake_case : List[Any] , snake_case : List[Any] , snake_case : Dict=False ):
'''simple docstring'''
A__ : str = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A__ : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=snake_case , )
A__ : Optional[Any] = inputs_dict["""labels"""]
A__ : Optional[Any] = inputs_dict["""labels"""]
A__ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=snake_case , )
A__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Tuple = OpenAIGPTModelTester(self )
A__ : Optional[int] = ConfigTester(self , config_class=snake_case , n_embd=37 )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*snake_case )
@slow
def _UpperCamelCase ( self : int ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : List[Any] = OpenAIGPTModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : List[str] = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(snake_case )
A__ : List[Any] = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=snake_case ) # the president is
A__ : int = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
4_0477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A__ : List[str] = model.generate(snake_case , do_sample=snake_case )
self.assertListEqual(output_ids[0].tolist() , snake_case )
| 359 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , snake_case : str , snake_case : List[Any]=13 , snake_case : Union[str, Any]=7 , snake_case : Optional[Any]=True , snake_case : str=True , snake_case : Dict=False , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : List[str]=4 , snake_case : Optional[int]=37 , snake_case : str="gelu" , snake_case : Tuple=0.1 , snake_case : Optional[int]=0.1 , snake_case : int=512 , snake_case : List[str]=16 , snake_case : str=2 , snake_case : Optional[int]=0.02 , snake_case : str=3 , snake_case : Dict=4 , snake_case : Optional[Any]=None , ):
'''simple docstring'''
A__ : int = parent
A__ : Union[str, Any] = batch_size
A__ : Optional[int] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Optional[Any] = use_token_type_ids
A__ : List[Any] = use_labels
A__ : Union[str, Any] = vocab_size
A__ : List[Any] = hidden_size
A__ : Any = num_hidden_layers
A__ : Any = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : Any = hidden_act
A__ : Tuple = hidden_dropout_prob
A__ : Dict = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Union[str, Any] = type_sequence_label_size
A__ : List[str] = initializer_range
A__ : Any = num_labels
A__ : Any = num_choices
A__ : int = scope
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Tuple = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A__ : Union[str, Any] = None
if self.use_token_type_ids:
A__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : int = None
A__ : int = None
A__ : List[str] = None
if self.use_labels:
A__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
A__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : Any , snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
A__ : List[Any] = model(snake_case , attention_mask=snake_case )
A__ : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self : Optional[int] , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : List[Any] , snake_case : Tuple , snake_case : Optional[Any] , ):
'''simple docstring'''
A__ : List[str] = BioGptForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Any , snake_case : str , snake_case : Tuple , snake_case : int , snake_case : Optional[Any] , snake_case : Any , *snake_case : Dict ):
'''simple docstring'''
A__ : Union[str, Any] = BioGptModel(config=snake_case )
model.to(snake_case )
model.eval()
# create attention mask
A__ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
A__ : Any = self.seq_length // 2
A__ : str = 0
# first forward pass
A__ , A__ : List[Any] = model(snake_case , attention_mask=snake_case ).to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : int = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
A__ : List[str] = ids_tensor((1,) , snake_case ).item() + 1
A__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
A__ : int = random_other_next_tokens
# append to next input_ids and attn_mask
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : List[Any] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case )] , dim=1 , )
# get two different outputs
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Optional[int] = model(snake_case , past_key_values=snake_case , attention_mask=snake_case )["""last_hidden_state"""]
# select random slice
A__ : List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
A__ : Any = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : Optional[Any] , *snake_case : str ):
'''simple docstring'''
A__ : Dict = BioGptModel(config=snake_case ).to(snake_case ).eval()
A__ : Tuple = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case )
# first forward pass
A__ : Dict = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
A__ , A__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
A__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
A__ : str = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ : Optional[int] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
A__ : Any = model(snake_case , attention_mask=snake_case )["""last_hidden_state"""]
A__ : Union[str, Any] = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[
"""last_hidden_state"""
]
# select random slice
A__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach()
A__ : List[Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : Optional[Any] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Tuple , *snake_case : Union[str, Any] , snake_case : Union[str, Any]=False ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM(snake_case )
model.to(snake_case )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
A__ : Optional[Any] = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self : int , snake_case : Optional[Any] , *snake_case : Optional[int] ):
'''simple docstring'''
A__ : int = BioGptModel(snake_case )
A__ : Union[str, Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self : Any , snake_case : Dict , snake_case : Tuple , snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , *snake_case : Any ):
'''simple docstring'''
A__ : Union[str, Any] = self.num_labels
A__ : int = BioGptForTokenClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : str = config_and_inputs
A__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : List[str] = BioGptModelTester(self )
A__ : List[Any] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Any ):
'''simple docstring'''
A__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A__ : str = type
self.model_tester.create_and_check_model(*snake_case )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case )
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case , gradient_checkpointing=snake_case )
def _UpperCamelCase ( self : Optional[int] ):
'''simple docstring'''
A__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case )
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case )
@slow
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Tuple = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
A__ : Optional[int] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = """left"""
# Define PAD Token = EOS Token = 50256
A__ : Optional[int] = tokenizer.eos_token
A__ : Dict = model.config.eos_token_id
# use different length sentences to test batching
A__ : Union[str, Any] = [
"""Hello, my dog is a little""",
"""Today, I""",
]
A__ : List[str] = tokenizer(snake_case , return_tensors="""pt""" , padding=snake_case )
A__ : str = inputs["""input_ids"""].to(snake_case )
A__ : Dict = model.generate(
input_ids=snake_case , attention_mask=inputs["""attention_mask"""].to(snake_case ) , )
A__ : Optional[int] = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Any = model.generate(input_ids=snake_case )
A__ : List[str] = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item()
A__ : str = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(snake_case )
A__ : Dict = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
A__ : Optional[Any] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
A__ : List[Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
A__ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
A__ : Optional[int] = [
"""Hello, my dog is a little bit bigger than a little bit.""",
"""Today, I have a good idea of how to use the information""",
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Optional[Any] = BioGptModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ , A__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[int] = 3
A__ : List[Any] = input_dict["""input_ids"""]
A__ : Dict = input_ids.ne(1 ).to(snake_case )
A__ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
A__ : Union[str, Any] = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : int = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ , A__ : str = self.model_tester.prepare_config_and_inputs_for_common()
A__ : Any = 3
A__ : List[Any] = """multi_label_classification"""
A__ : Dict = input_dict["""input_ids"""]
A__ : Tuple = input_ids.ne(1 ).to(snake_case )
A__ : Any = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
A__ : Tuple = BioGptForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
A__ : List[str] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self : List[str] ):
'''simple docstring'''
A__ : Optional[Any] = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
A__ : str = torch.tensor([[2, 4805, 9, 656, 21]] )
A__ : Dict = model(snake_case )[0]
A__ : Tuple = 4_2384
A__ : str = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case )
A__ : str = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" )
A__ : Any = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" )
model.to(snake_case )
torch.manual_seed(0 )
A__ : Tuple = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(snake_case )
A__ : Optional[int] = model.generate(
**snake_case , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=snake_case , )
A__ : Optional[int] = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case )
A__ : List[str] = (
"""COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"""
""" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"""
""" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"""
""" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"""
""" more than 800,000 deaths."""
)
self.assertEqual(snake_case , snake_case )
| 296 | 0 |
from __future__ import annotations
def __lowercase ( lowerCamelCase : list ):
if len(lowerCamelCase ) == 0:
return []
UpperCamelCase_, UpperCamelCase_ : List[str] = min(lowerCamelCase ), max(lowerCamelCase )
UpperCamelCase_ : Tuple = int(max_value - min_value ) + 1
UpperCamelCase_ : list[list] = [[] for _ in range(lowerCamelCase )]
for i in my_list:
buckets[int(i - min_value )].append(lowerCamelCase )
return [v for bucket in buckets for v in sorted(lowerCamelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 175 | import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : List[Any] = 'huggingface/label-files'
UpperCamelCase_ : int = 'imagenet-1k-id2label.json'
UpperCamelCase_ : int = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type='dataset' ) , 'r' ) )
UpperCamelCase_ : str = {int(lowerCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase_ : Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase_ : str = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
UpperCamelCase_ : int = BitConfig(
conv_layer=lowerCamelCase , num_labels=1000 , idalabel=lowerCamelCase , labelaid=lowerCamelCase , )
return config
def __lowercase ( lowerCamelCase : int ):
if "stem.conv" in name:
UpperCamelCase_ : str = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
UpperCamelCase_ : str = name.replace('blocks' , 'layers' )
if "head.fc" in name:
UpperCamelCase_ : Dict = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
UpperCamelCase_ : List[str] = 'bit.' + name
if "bit" not in name and "classifier" not in name:
UpperCamelCase_ : int = 'bit.encoder.' + name
return name
def __lowercase ( ):
UpperCamelCase_ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase_ : List[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase ).raw )
return im
@torch.no_grad()
def __lowercase ( lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : List[str]=False ):
UpperCamelCase_ : Optional[Any] = get_config(lowerCamelCase )
# load original model from timm
UpperCamelCase_ : Dict = create_model(lowerCamelCase , pretrained=lowerCamelCase )
timm_model.eval()
# load state_dict of original model
UpperCamelCase_ : str = timm_model.state_dict()
for key in state_dict.copy().keys():
UpperCamelCase_ : Tuple = state_dict.pop(lowerCamelCase )
UpperCamelCase_ : str = val.squeeze() if 'head' in key else val
# load HuggingFace model
UpperCamelCase_ : Dict = BitForImageClassification(lowerCamelCase )
model.eval()
model.load_state_dict(lowerCamelCase )
# create image processor
UpperCamelCase_ : int = create_transform(**resolve_data_config({} , model=lowerCamelCase ) )
UpperCamelCase_ : List[Any] = transform.transforms
UpperCamelCase_ : str = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
UpperCamelCase_ : Tuple = BitImageProcessor(
do_resize=lowerCamelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCamelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowerCamelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
UpperCamelCase_ : Dict = prepare_img()
UpperCamelCase_ : Any = transform(lowerCamelCase ).unsqueeze(0 )
UpperCamelCase_ : Union[str, Any] = processor(lowerCamelCase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCamelCase , lowerCamelCase )
# verify logits
with torch.no_grad():
UpperCamelCase_ : Dict = model(lowerCamelCase )
UpperCamelCase_ : Tuple = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
UpperCamelCase_ : List[Any] = timm_model(lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCamelCase ).mkdir(exist_ok=lowerCamelCase )
print(F"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
processor.save_pretrained(lowerCamelCase )
if push_to_hub:
print(F"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(F"ybelkada/{model_name}" )
processor.push_to_hub(F"ybelkada/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='resnetv2_50x1_bitm',
type=str,
help='Name of the BiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model to the hub.',
)
a_ = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 175 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _a :
SCREAMING_SNAKE_CASE_ : Optional[Union[str, Path]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : bool = True
SCREAMING_SNAKE_CASE_ : Optional[int] = None
SCREAMING_SNAKE_CASE_ : int = 1
SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = None
SCREAMING_SNAKE_CASE_ : bool = False
SCREAMING_SNAKE_CASE_ : Optional[Dict] = None
SCREAMING_SNAKE_CASE_ : Optional[str] = None
def _lowercase ( self ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(_SCREAMING_SNAKE_CASE ) for k, v in self.__dict__.items()} )
| 142 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
UpperCamelCase_ : int = TypeVar('''T''')
UpperCamelCase_ : Dict = TypeVar('''U''')
class _a ( Generic[T, U] ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
_snake_case = key
_snake_case = val
_snake_case = None
_snake_case = None
def __repr__( self ) -> str:
return (
f"""Node: key: {self.key}, val: {self.val}, """
f"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class _a ( Generic[T, U] ):
def __init__( self ) -> None:
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
_snake_case , _snake_case = self.rear, self.head
def __repr__( self ) -> str:
_snake_case = ["DoubleLinkedList"]
_snake_case = self.head
while node.next is not None:
rep.append(str(_SCREAMING_SNAKE_CASE ) )
_snake_case = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_SCREAMING_SNAKE_CASE )
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> None:
_snake_case = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_snake_case = node
_snake_case = previous
_snake_case = node
_snake_case = self.rear
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> DoubleLinkedListNode[T, U] | None:
if node.prev is None or node.next is None:
return None
_snake_case = node.next
_snake_case = node.prev
_snake_case = None
_snake_case = None
return node
class _a ( Generic[T, U] ):
SCREAMING_SNAKE_CASE_ : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self ,_SCREAMING_SNAKE_CASE ) -> str:
_snake_case = DoubleLinkedList()
_snake_case = capacity
_snake_case = 0
_snake_case = 0
_snake_case = 0
_snake_case = {}
def __repr__( self ) -> str:
return (
f"""CacheInfo(hits={self.hits}, misses={self.miss}, """
f"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self ,_SCREAMING_SNAKE_CASE ) -> bool:
return key in self.cache
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ) -> U | None:
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_snake_case = self.cache[key]
_snake_case = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_SCREAMING_SNAKE_CASE )
return node.val
self.miss += 1
return None
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> None:
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_snake_case = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_SCREAMING_SNAKE_CASE ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_snake_case = DoubleLinkedListNode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_snake_case = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_snake_case = value
self.list.add(_SCREAMING_SNAKE_CASE )
@classmethod
def _lowercase ( cls ,_SCREAMING_SNAKE_CASE = 128 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
def cache_decorator_inner(_SCREAMING_SNAKE_CASE ) -> Callable[..., U]:
def cache_decorator_wrapper(*_SCREAMING_SNAKE_CASE ) -> U:
if func not in cls.decorator_function_to_instance_map:
_snake_case = LRUCache(_SCREAMING_SNAKE_CASE )
_snake_case = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_snake_case = func(*_SCREAMING_SNAKE_CASE )
cls.decorator_function_to_instance_map[func].put(args[0] ,_SCREAMING_SNAKE_CASE )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_SCREAMING_SNAKE_CASE ,"cache_info" ,_SCREAMING_SNAKE_CASE ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 1 |
"""simple docstring"""
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowercase (SCREAMING_SNAKE_CASE_ : int = 8 ) -> str:
SCREAMING_SNAKE_CASE = ascii_letters + digits + punctuation
return "".join(secrets.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = i // 3
SCREAMING_SNAKE_CASE = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
SCREAMING_SNAKE_CASE = (
chars_incl
+ random(SCREAMING_SNAKE_CASE_ , quotient + remainder )
+ random(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
+ random(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
)
SCREAMING_SNAKE_CASE = list(SCREAMING_SNAKE_CASE_ )
shuffle(SCREAMING_SNAKE_CASE_ )
return "".join(SCREAMING_SNAKE_CASE_ )
# random is a generalised function for letters, characters and numbers
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> str:
return "".join(secrets.choice(SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ ) )
def lowercase (SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
pass # Put your code here...
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
pass # Put your code here...
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
pass # Put your code here...
def lowercase (SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int = 8 ) -> bool:
if len(SCREAMING_SNAKE_CASE_ ) < min_length:
# Your Password must be at least 8 characters long
return False
SCREAMING_SNAKE_CASE = any(char in ascii_uppercase for char in password )
SCREAMING_SNAKE_CASE = any(char in ascii_lowercase for char in password )
SCREAMING_SNAKE_CASE = any(char in digits for char in password )
SCREAMING_SNAKE_CASE = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowercase () -> Optional[int]:
SCREAMING_SNAKE_CASE = int(input('Please indicate the max length of your password: ' ).strip() )
SCREAMING_SNAKE_CASE = input(
'Please indicate the characters that must be in your password: ' ).strip()
print('Password generated:' , password_generator(SCREAMING_SNAKE_CASE_ ) )
print(
'Alternative Password generated:' , alternative_password_generator(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , )
print('[If you are thinking of using this passsword, You better save it.]' )
if __name__ == "__main__":
main()
| 113 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception('must have the same size' )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
SCREAMING_SNAKE_CASE = len(self )
if size == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
SCREAMING_SNAKE_CASE = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(self )
SCREAMING_SNAKE_CASE = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception('invalid operand!' )
def __A ( self ) -> Vector:
return Vector(self.__components )
def __A ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE = value
def __A ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
SCREAMING_SNAKE_CASE = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
SCREAMING_SNAKE_CASE = self * other
SCREAMING_SNAKE_CASE = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return Vector([0] * dimension )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and (isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ))
SCREAMING_SNAKE_CASE = [0] * dimension
SCREAMING_SNAKE_CASE = 1
return Vector(SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Vector , SCREAMING_SNAKE_CASE_ : Vector ) -> Vector:
assert (
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
and (isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ))
)
return x * scalar + y
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Vector:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
return Vector(SCREAMING_SNAKE_CASE_ )
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
SCREAMING_SNAKE_CASE = matrix
SCREAMING_SNAKE_CASE = w
SCREAMING_SNAKE_CASE = h
def __str__( self ) -> str:
SCREAMING_SNAKE_CASE = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE = []
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
SCREAMING_SNAKE_CASE = zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __A ( self ) -> int:
return self.__height
def __A ( self ) -> int:
return self.__width
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE = value
else:
raise Exception('change_component: indices out of bounds' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
SCREAMING_SNAKE_CASE = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception('Indices out of bounds' )
def __A ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def lowercase (SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
SCREAMING_SNAKE_CASE = [[0] * n for _ in range(SCREAMING_SNAKE_CASE_ )]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Matrix:
random.seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = [
[random.randint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )
]
return Matrix(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 113 | 1 |
def lowerCamelCase__ ( a , a , a ) -> float:
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(a , a ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
_A: str = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_A: Tuple = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 301 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
UpperCAmelCase__ : Tuple = {
'config': [
'EXTERNAL_DATA_FORMAT_SIZE_LIMIT',
'OnnxConfig',
'OnnxConfigWithPast',
'OnnxSeq2SeqConfigWithPast',
'PatchingSpec',
],
'convert': ['export', 'validate_model_outputs'],
'features': ['FeaturesManager'],
'utils': ['ParameterFormat', 'compute_serialized_parameters_size'],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
UpperCAmelCase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 301 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Optional[Any] = """philschmid/bart-large-cnn-samsum"""
_SCREAMING_SNAKE_CASE :int = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_SCREAMING_SNAKE_CASE :Optional[int] = """summarizer"""
_SCREAMING_SNAKE_CASE :Union[str, Any] = AutoTokenizer
_SCREAMING_SNAKE_CASE :int = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE :str = ["""text"""]
_SCREAMING_SNAKE_CASE :Optional[int] = ["""text"""]
def _a ( self , _a ) -> Any:
"""simple docstring"""
return self.pre_processor(_a , return_tensors="""pt""" , truncation=_a )
def _a ( self , _a ) -> Union[str, Any]:
"""simple docstring"""
return self.model.generate(**_a )[0]
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
return self.pre_processor.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a )
| 132 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
a :Optional[Any] = logging.get_logger(__name__)
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , *_a , **_a ) -> None:
"""simple docstring"""
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 132 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__snake_case : int = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
__snake_case : int = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : Tuple=False ) -> Any:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = create_model(
'HTSAT-tiny' , 'roberta' , UpperCamelCase_ , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=UpperCamelCase_ , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def _UpperCamelCase ( UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = r'.*sequential.(\d+).*'
lowerCAmelCase__ = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCAmelCase__ = key.replace(UpperCamelCase_ , UpperCamelCase_ )
if re.match(UpperCamelCase_ , UpperCamelCase_ ):
# replace sequential layers with list
lowerCAmelCase__ = re.match(UpperCamelCase_ , UpperCamelCase_ ).group(1 )
lowerCAmelCase__ = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(UpperCamelCase_ )//3}.linear." )
elif re.match(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowerCAmelCase__ = 1 if projecton_layer == 0 else 2
lowerCAmelCase__ = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowerCAmelCase__ = value
lowerCAmelCase__ = mixed_qkv.size(0 ) // 3
lowerCAmelCase__ = mixed_qkv[:qkv_dim]
lowerCAmelCase__ = mixed_qkv[qkv_dim : qkv_dim * 2]
lowerCAmelCase__ = mixed_qkv[qkv_dim * 2 :]
lowerCAmelCase__ = query_layer
lowerCAmelCase__ = key_layer
lowerCAmelCase__ = value_layer
else:
lowerCAmelCase__ = value
return model_state_dict
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Any=False ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = init_clap(UpperCamelCase_ , enable_fusion=UpperCamelCase_ )
clap_model.eval()
lowerCAmelCase__ = clap_model.state_dict()
lowerCAmelCase__ = rename_state_dict(UpperCamelCase_ )
lowerCAmelCase__ = ClapConfig()
lowerCAmelCase__ = enable_fusion
lowerCAmelCase__ = ClapModel(UpperCamelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
transformers_config.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__snake_case : int = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 354 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : List[str] = RoCBertTokenizer
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : List[Any] = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = filter_non_english
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for i, value in enumerate(_UpperCamelCase ):
lowerCAmelCase__ = i
lowerCAmelCase__ = i
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_UpperCamelCase , _UpperCamelCase , ensure_ascii=_UpperCamelCase )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_UpperCamelCase , _UpperCamelCase , ensure_ascii=_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_UpperCamelCase , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_UpperCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_UpperCamelCase ) , [5, 6, 2, 5, 7, 8] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , strip_accents=_UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = RoCBertBasicTokenizer(do_lower_case=_UpperCamelCase , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowerCAmelCase__ = {}
for i, token in enumerate(_UpperCamelCase ):
lowerCAmelCase__ = i
lowerCAmelCase__ = RoCBertWordpieceTokenizer(vocab=_UpperCamelCase , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_UpperCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
lowerCAmelCase__ = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_UpperCamelCase ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
lowerCAmelCase__ = tokenizer_r.encode_plus(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , return_token_type_ids=_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase , )
lowerCAmelCase__ = tokenizer_r.do_lower_case if hasattr(_UpperCamelCase , 'do_lower_case' ) else False
lowerCAmelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = ['的', '人', '有']
lowerCAmelCase__ = ''.join(_UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase__ = True
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(_UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = False
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(_UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
lowerCAmelCase__ = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(_UpperCamelCase )
]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
lowerCAmelCase__ = tokenizer.encode('你好' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode('你是谁' , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(_UpperCamelCase , _UpperCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCamelCase )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
lowerCAmelCase__ = '你好,你是谁'
lowerCAmelCase__ = tokenizer.tokenize(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_tokens_to_shape_ids(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.convert_tokens_to_pronunciation_ids(_UpperCamelCase )
lowerCAmelCase__ = tokenizer.prepare_for_model(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , add_special_tokens=_UpperCamelCase )
lowerCAmelCase__ = tokenizer.encode_plus(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
| 122 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def __lowercase ( __lowercase , __lowercase=False ) -> List[str]:
'''simple docstring'''
_A = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_A = "segformer.encoder." + key
if key.startswith("backbone" ):
_A = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_A = key[key.find("patch_embed" ) + len("patch_embed" )]
_A = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__lowercase )-1}''' )
if "norm" in key:
_A = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_A = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_A = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__lowercase )-1}''' )
if "layer_norm1" in key:
_A = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_A = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_A = key[key.find("block" ) + len("block" )]
_A = key.replace(F'''block{idx}''' , F'''block.{int(__lowercase )-1}''' )
if "attn.q" in key:
_A = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_A = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_A = key.replace("attn" , "attention.self" )
if "fc1" in key:
_A = key.replace("fc1" , "dense1" )
if "fc2" in key:
_A = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_A = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_A = key.replace("linear_fuse.conv" , "linear_fuse" )
_A = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_A = key[key.find("linear_c" ) + len("linear_c" )]
_A = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__lowercase )-1}''' )
if key.startswith("head" ):
_A = key.replace("head" , "classifier" )
_A = value
return new_state_dict
def __lowercase ( __lowercase , __lowercase ) -> Optional[Any]:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_A = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
_A = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
_A = kv_weight[
: config.hidden_sizes[i], :
]
_A = kv_bias[: config.hidden_sizes[i]]
_A = kv_weight[
config.hidden_sizes[i] :, :
]
_A = kv_bias[
config.hidden_sizes[i] :
]
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = "http://images.cocodataset.org/val2017/000000039769.jpg"
_A = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return image
@torch.no_grad()
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = SegformerConfig()
_A = False
# set attributes based on model_name
_A = "huggingface/label-files"
if "segformer" in model_name:
_A = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_A = 150
_A = "ade20k-id2label.json"
_A = (1, 150, 128, 128)
elif "city" in model_name:
_A = 19
_A = "cityscapes-id2label.json"
_A = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
_A = True
_A = model_name[4:6]
_A = 1000
_A = "imagenet-1k-id2label.json"
_A = (1, 1000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
_A = json.load(open(hf_hub_download(__lowercase , __lowercase , repo_type="dataset" ) , "r" ) )
_A = {int(__lowercase ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_A = [64, 128, 320, 512]
_A = 256
elif size == "b2":
_A = [64, 128, 320, 512]
_A = 768
_A = [3, 4, 6, 3]
elif size == "b3":
_A = [64, 128, 320, 512]
_A = 768
_A = [3, 4, 18, 3]
elif size == "b4":
_A = [64, 128, 320, 512]
_A = 768
_A = [3, 8, 27, 3]
elif size == "b5":
_A = [64, 128, 320, 512]
_A = 768
_A = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
_A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=__lowercase , align=__lowercase , do_random_crop=__lowercase )
# prepare image
_A = prepare_img()
_A = image_processor(images=__lowercase , return_tensors="pt" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
_A = torch.load(__lowercase , map_location=torch.device("cpu" ) )
else:
_A = torch.load(__lowercase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_A = rename_keys(__lowercase , encoder_only=__lowercase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__lowercase , __lowercase )
# create HuggingFace model and load state dict
if encoder_only:
_A = False
_A = SegformerForImageClassification(__lowercase )
else:
_A = SegformerForSemanticSegmentation(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# forward pass
_A = model(__lowercase )
_A = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_A = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_A = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_A = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_A = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_A = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_A = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_A = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_A = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_A = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_A = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_A = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_A = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_A = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_A = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_A = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
_A = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
model.save_pretrained(__lowercase )
image_processor.save_pretrained(__lowercase )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCamelCase_ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 79 |
"""simple docstring"""
from string import ascii_uppercase
_A = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCAmelCase__ : int = """"""
lowerCAmelCase__ : List[Any] = 0
lowerCAmelCase__ : Tuple = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(__UpperCAmelCase , __UpperCAmelCase )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase__ : Dict = ALPHABET_VALUES[str(__UpperCAmelCase )]
else:
lowerCAmelCase__ : Union[str, Any] = str(__UpperCAmelCase )
new_value += actual_value
lowerCAmelCase__ : Optional[Any] = num // base
lowerCAmelCase__ : Union[str, Any] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(__UpperCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 242 | 0 |
"""simple docstring"""
import string
def lowercase__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : Any = ''
for i in sequence:
lowercase : int = ord(_UpperCAmelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowercase__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : List[str] = string.ascii_letters
lowercase : Union[str, Any] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCAmelCase )] if c in letters else c for c in sequence )
def lowercase__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('Running performance benchmarks...' )
lowercase : List[str] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(f'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCAmelCase )} seconds''' )
print(f'''> atbash(): {timeit('atbash(printable)' , setup=_UpperCAmelCase )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 53 |
"""simple docstring"""
from collections.abc import Sequence
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase = False ) -> float:
'''simple docstring'''
if not arr:
return 0
lowercase : List[str] = 0 if allow_empty_subarrays else float('-inf' )
lowercase : Dict = 0.0
for num in arr:
lowercase : List[str] = max(0 if allow_empty_subarrays else num , curr_sum + num )
lowercase : List[Any] = max(_UpperCAmelCase , _UpperCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_UpperCamelCase: Any = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'''{max_subarray_sum(nums) = }''')
| 53 | 1 |
'''simple docstring'''
from typing import List
import numpy as np
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = {key: len(__lowercase ) for key, value in gen_kwargs.items() if isinstance(__lowercase , __lowercase )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
_A = max(lists_lengths.values() , default=0 )
return max(1 , __lowercase )
def __lowercase ( __lowercase , __lowercase ) -> List[range]:
'''simple docstring'''
_A = []
for group_idx in range(__lowercase ):
_A = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_A = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_A = range(__lowercase , start + num_shards_to_add )
shards_indices_per_group.append(__lowercase )
return shards_indices_per_group
def __lowercase ( __lowercase , __lowercase ) -> List[dict]:
'''simple docstring'''
_A = _number_of_shards_in_gen_kwargs(__lowercase )
if num_shards == 1:
return [dict(__lowercase )]
else:
_A = _distribute_shards(num_shards=__lowercase , max_num_jobs=__lowercase )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(__lowercase , __lowercase )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(__lowercase ) )
]
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , __lowercase )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __lowercase ( __lowercase , __lowercase ) -> dict:
'''simple docstring'''
_A = {len(__lowercase ) for value in gen_kwargs.values() if isinstance(__lowercase , __lowercase )}
_A = {}
for size in list_sizes:
_A = list(range(__lowercase ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_A = dict(__lowercase )
for key, value in shuffled_kwargs.items():
if isinstance(__lowercase , __lowercase ):
_A = [value[i] for i in indices_per_size[len(__lowercase )]]
return shuffled_kwargs
| 79 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 0
snake_case = False
snake_case = 3.0
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"a": 2, "c": 2.25} )
@require_cuda
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
_A = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_A = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , __UpperCAmelCase )
@require_multi_gpu
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase_ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase_ = torch.nn.Linear(1_00, 2_00)
lowerCamelCase_ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase_ = ''''''
lowerCamelCase_ = model.bucket_bytes_cap // (10_24 * 10_24)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 79 | 1 |
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
a__ : str =tmp_path / "cache"
a__ : int ={"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ : Any =TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
a__ : Any =tmp_path / "cache"
a__ : Any ={"text": "string"}
a__ : List[str] =features.copy() if features else default_expected_features
a__ : Optional[int] =(
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ : str =TextDatasetReader(SCREAMING_SNAKE_CASE , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _A ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
a__ : Dict =tmp_path / "cache"
a__ : int ={"text": "string"}
a__ : Optional[int] =TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE , split=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _A ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : int =text_path
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
a__ : List[str] =[text_path]
a__ : int =tmp_path / "cache"
a__ : Any ={"text": "string"}
a__ : Union[str, Any] =TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Any]=("train",) ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for split in splits:
a__ : Dict =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _A ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
a__ : Dict =tmp_path / "cache"
a__ : str ={"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
a__ : Dict =TextDatasetReader({"train": text_path} , cache_dir=SCREAMING_SNAKE_CASE , keep_in_memory=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _A ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
a__ : Optional[int] =tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
a__ : Optional[int] ={"text": "string"}
a__ : Any =features.copy() if features else default_expected_features
a__ : List[str] =(
Features({feature: Value(SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
a__ : List[str] =TextDatasetReader({"train": text_path} , features=SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _A ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
if split:
a__ : Any ={split: text_path}
else:
a__ : Tuple ="train"
a__ : List[Any] ={"train": text_path, "test": text_path}
a__ : int =tmp_path / "cache"
a__ : Dict ={"text": "string"}
a__ : List[Any] =TextDatasetReader(SCREAMING_SNAKE_CASE , cache_dir=SCREAMING_SNAKE_CASE ).read()
_check_text_datasetdict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 148 |
import os
def _A ( ):
"""simple docstring"""
a__ : Optional[int] =os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , "num.txt" )
with open(SCREAMING_SNAKE_CASE ) as file_hand:
return str(sum(int(SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 148 | 1 |
'''simple docstring'''
def UpperCamelCase__ ( lowerCAmelCase = 10 , lowerCAmelCase = 22 ):
"""simple docstring"""
_lowerCAmelCase = range(1 , lowerCAmelCase )
_lowerCAmelCase = range(1 , lowerCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""")
| 70 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
UpperCAmelCase_ = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: int , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: str , __UpperCAmelCase: Any ) -> List[str]:
for attribute in key.split('''.''' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
UpperCamelCase__ : Tuple = '''lm_head'''
UpperCamelCase__ : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase )
if weight_type is not None:
UpperCamelCase__ : List[str] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
else:
UpperCamelCase__ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ : List[Any] = value
elif weight_type == "weight_g":
UpperCamelCase__ : List[str] = value
elif weight_type == "weight_v":
UpperCamelCase__ : str = value
elif weight_type == "bias":
UpperCamelCase__ : Tuple = value
else:
UpperCamelCase__ : int = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict , __UpperCAmelCase: str ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
UpperCamelCase__ : str = fairseq_model.state_dict()
UpperCamelCase__ : Optional[int] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase__ : int = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ : Tuple = True
if "*" in mapped_key:
UpperCamelCase__ : Any = name.split(__UpperCAmelCase )[0].split('''.''' )[-2]
UpperCamelCase__ : Optional[Any] = mapped_key.replace('''*''' , __UpperCAmelCase )
if "weight_g" in name:
UpperCamelCase__ : List[str] = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ : Dict = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ : List[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ : Optional[Any] = '''weight'''
else:
UpperCamelCase__ : List[Any] = None
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
continue
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Any , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Tuple ) -> Optional[int]:
UpperCamelCase__ : List[str] = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ : List[str] = name.split('''.''' )
UpperCamelCase__ : str = int(items[0] )
UpperCamelCase__ : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ : Tuple = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ : int = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ : List[str] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[Any] , __UpperCAmelCase: Dict=None , __UpperCAmelCase: Optional[Any]=None , __UpperCAmelCase: Optional[int]=True ) -> Union[str, Any]:
if config_path is not None:
UpperCamelCase__ : str = UniSpeechConfig.from_pretrained(__UpperCAmelCase )
else:
UpperCamelCase__ : List[Any] = UniSpeechConfig()
if is_finetuned:
if dict_path:
UpperCamelCase__ : str = Dictionary.load_from_json(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase__ : Any = target_dict.pad_index
UpperCamelCase__ : str = target_dict.bos_index
UpperCamelCase__ : Any = target_dict.eos_index
UpperCamelCase__ : Tuple = len(target_dict.symbols )
UpperCamelCase__ : List[str] = os.path.join(__UpperCAmelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
UpperCamelCase__ : Dict = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase__ : Optional[Any] = 42
UpperCamelCase__ : List[str] = 43
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Dict = WavaVecaPhonemeCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCAmelCase , )
UpperCamelCase__ : List[Any] = True if config.feat_extract_norm == '''layer''' else False
UpperCamelCase__ : str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
UpperCamelCase__ : str = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = UniSpeechForCTC(__UpperCAmelCase )
else:
UpperCamelCase__ : Any = UniSpeechForPreTraining(__UpperCAmelCase )
if is_finetuned:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] ), '''w2v_path''': checkpoint_path} )
else:
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
UpperCamelCase__ : Tuple = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
hf_unispeech.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
UpperCAmelCase_ = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 201 | 0 |
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCAmelCase = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : int ,_a : Union[str, Any] ,_a : List[str]=None ,_a : Dict=1 ):
'''simple docstring'''
_a : str = tokenizer
_a : List[Any] = dataset
_a : Dict = len(_a ) if n_tasks is None else n_tasks
_a : Tuple = n_copies
def __iter__( self : int ):
'''simple docstring'''
_a : Optional[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
_a : str = self.tokenizer(_a ,padding=_a ,return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] ,_a : List[Any] ,_a : Any ,_a : Optional[int] ):
'''simple docstring'''
_a : Optional[int] = start_length
_a : int = eof_strings
_a : Dict = tokenizer
def __call__( self : Optional[int] ,_a : Tuple ,_a : str ,**_a : List[Any] ):
'''simple docstring'''
_a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_a : int = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_a )
def UpperCAmelCase_ (__a : str ):
"""simple docstring"""
_a : Dict = re.split('(%s)' % '|'.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def UpperCAmelCase_ (__a : Dict , __a : Optional[Any] , __a : Union[str, Any] , __a : int , __a : List[Any] , __a : Optional[Any]=2_0 , **__a : str ):
"""simple docstring"""
_a : Any = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
_a : str = batch['ids'].shape[-1]
_a : List[Any] = accelerator.unwrap_model(__a ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
_a : Optional[Any] = batch['task_id'].repeat(__a )
_a : int = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
_a : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
_a : Any = generated_tokens.cpu().numpy()
_a : List[str] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
_a : str = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_a : Tuple = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = HfArgumentParser(__a )
_a : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_a : Optional[int] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_a : Union[str, Any] = 'false'
if args.num_workers is None:
_a : str = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_a : Any = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
_a : Optional[int] = AutoTokenizer.from_pretrained(args.model_ckpt )
_a : Optional[int] = tokenizer.eos_token
_a : Optional[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_a : Dict = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
_a : Optional[int] = load_dataset('openai_humaneval' )
_a : Optional[int] = load_metric('code_eval' )
_a : List[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
_a : Union[str, Any] = args.n_samples // args.batch_size
_a : str = TokenizedDataset(__a , human_eval['test'] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
_a : str = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_a : Tuple = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
_a : str = accelerator.prepare(__a , __a )
_a : Optional[int] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
_a : List[Any] = []
for task in tqdm(range(__a ) ):
_a : Dict = human_eval['test'][task]['test']
_a : Optional[int] = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
_a : str = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 364 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) != 3_2:
raise ValueError('Input must be of length 32' )
_a : Any = b''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '08x' )[-8:]
_a : str = b''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : List[Any] = b''
for char in message:
bit_string += format(__a , '08b' ).encode('utf-8' )
_a : int = format(len(__a ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__a ) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] )
return bit_string
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
if len(__a ) % 5_1_2 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(__a ) , 5_1_2 ):
_a : List[Any] = bit_string[pos : pos + 5_1_2]
_a : str = []
for i in range(0 , 5_1_2 , 3_2 ):
block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) )
yield block_words
def UpperCAmelCase_ (__a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
_a : List[str] = format(__a , '032b' )
_a : int = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__a , 2 )
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
return (a + b) % 2**3_2
def UpperCAmelCase_ (__a : int , __a : int ):
"""simple docstring"""
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def UpperCAmelCase_ (__a : bytes ):
"""simple docstring"""
_a : str = preprocess(__a )
_a : Optional[int] = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )]
# Starting states
_a : int = 0x67_45_23_01
_a : Union[str, Any] = 0xEF_CD_AB_89
_a : str = 0x98_BA_DC_FE
_a : List[Any] = 0x10_32_54_76
_a : Optional[int] = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__a ):
_a : Union[str, Any] = aa
_a : List[Any] = ba
_a : List[Any] = ca
_a : Dict = da
# Hash current chunk
for i in range(6_4 ):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a : Optional[int] = d ^ (b & (c ^ d))
_a : Optional[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a : Optional[Any] = c ^ (d & (b ^ c))
_a : Dict = (5 * i + 1) % 1_6
elif i <= 4_7:
_a : Optional[Any] = b ^ c ^ d
_a : Dict = (3 * i + 5) % 1_6
else:
_a : int = c ^ (b | not_aa(__a ))
_a : List[str] = (7 * i) % 1_6
_a : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**3_2
_a : Union[str, Any] = d
_a : Tuple = c
_a : Optional[int] = b
_a : Union[str, Any] = sum_aa(__a , left_rotate_aa(__a , shift_amounts[i] ) )
# Add hashed chunk to running total
_a : Any = sum_aa(__a , __a )
_a : Dict = sum_aa(__a , __a )
_a : Union[str, Any] = sum_aa(__a , __a )
_a : str = sum_aa(__a , __a )
_a : Optional[Any] = reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a ) + reformat_hex(__a )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5 | 0 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> int:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase__ )
if number < 1:
UpperCAmelCase__ : Dict = F"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = 1
for i in range(1 , lowerCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 181 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase_ ( __a ):
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class lowerCamelCase_ :
def __init__( self : int , _A : Tuple , _A : Any=13 , _A : Optional[int]=64 , _A : Optional[Any]=3 , _A : List[str]=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Optional[int]=[1, 2, 10] , _A : int=[7, 3, 3] , _A : Union[str, Any]=[4, 2, 2] , _A : Dict=[2, 1, 1] , _A : Optional[Any]=[2, 2, 2] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=[0.0, 0.0, 0.0] , _A : str=0.0_2 , _A : Tuple=1e-12 , _A : Union[str, Any]=True , _A : Optional[Any]=True , _A : Optional[int]=2 , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[int] = image_size
UpperCAmelCase__ : List[str] = patch_sizes
UpperCAmelCase__ : Any = patch_stride
UpperCAmelCase__ : Tuple = patch_padding
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : Dict = use_labels
UpperCAmelCase__ : List[Any] = num_labels
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Optional[int] = embed_dim
UpperCAmelCase__ : int = num_heads
UpperCAmelCase__ : Any = stride_kv
UpperCAmelCase__ : str = depth
UpperCAmelCase__ : List[Any] = cls_token
UpperCAmelCase__ : List[Any] = attention_drop_rate
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = layer_norm_eps
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_labels:
# create a random int32 tensor of given shape
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Any ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase_ ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = TFCvtModel(config=_A )
UpperCAmelCase__ : List[str] = model(_A , training=_A )
UpperCAmelCase__ : int = (self.image_size, self.image_size)
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase__ : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase__ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase_ ( self : Optional[Any] , _A : Optional[Any] , _A : List[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFCvtForImageClassification(_A )
UpperCAmelCase__ : Any = model(_A , labels=_A , training=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = TFCvtModelTester(self )
UpperCAmelCase__ : Tuple = TFCvtConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def lowercase_ ( self : Any ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason='''Cvt does not output attentions''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def lowercase_ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , )
@slow
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason='''Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = tf.keras.mixed_precision.Policy('''mixed_float16''' )
tf.keras.mixed_precision.set_global_policy(_A )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy('''float32''' )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : List[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(_A : Dict , _A : Optional[Any] , _A : Dict ):
UpperCAmelCase__ : str = model_class(_A )
UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase__ : Tuple = outputs.hidden_states
UpperCAmelCase__ : int = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Tuple = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@slow
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[int] = TFCvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> Any:
UpperCAmelCase__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCAmelCase__ : Union[str, Any] = self.default_image_processor
UpperCAmelCase__ : Optional[Any] = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=_A , return_tensors='''tf''' )
# forward pass
UpperCAmelCase__ : Optional[Any] = model(**_A )
# verify the logits
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase__ : Union[str, Any] = tf.constant([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _A , atol=1e-4 ) )
| 181 | 1 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase : str = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase : List[Any] = {
"roberta-base": 5_1_2,
"roberta-large": 5_1_2,
"roberta-large-mnli": 5_1_2,
"distilroberta-base": 5_1_2,
"roberta-base-openai-detector": 5_1_2,
"roberta-large-openai-detector": 5_1_2,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["input_ids", "attention_mask"]
lowercase = RobertaTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="replace" , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = getattr(__UpperCAmelCase , pre_tok_state.pop('type' ) )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = pre_tok_class(**__UpperCAmelCase )
__UpperCamelCase = add_prefix_space
__UpperCamelCase = 'post_processor'
__UpperCamelCase = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
if tokenizer_component_instance:
__UpperCamelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCamelCase = tuple(state['sep'] )
if "cls" in state:
__UpperCamelCase = tuple(state['cls'] )
__UpperCamelCase = False
if state.get('add_prefix_space' , __UpperCAmelCase ) != add_prefix_space:
__UpperCamelCase = add_prefix_space
__UpperCamelCase = True
if state.get('trim_offsets' , __UpperCAmelCase ) != trim_offsets:
__UpperCamelCase = trim_offsets
__UpperCamelCase = True
if changes_to_apply:
__UpperCamelCase = getattr(__UpperCAmelCase , state.pop('type' ) )
__UpperCamelCase = component_class(**__UpperCAmelCase )
setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase )
@property
def UpperCAmelCase ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value
__UpperCamelCase = value
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = kwargs.get('is_split_into_words' , __UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
__UpperCamelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 263 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( snake_case :str , snake_case :str = "cpu" , snake_case :Union[str, None] = None ) -> None:
__UpperCamelCase = torch.load(snake_case , map_location=snake_case )
for k, v in tqdm(state_dict.items() ):
if not isinstance(snake_case , torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
__UpperCamelCase = v.half()
if save_path is None: # overwrite src_path
__UpperCamelCase = src_path
torch.save(snake_case , snake_case )
if __name__ == "__main__":
fire.Fire(convert)
| 263 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase_ : Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCamelCase_ : int = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCamelCase_ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
with open(lowercase_ , 'rb' ) as f:
A_ : Dict = Image.open(lowercase_ )
return im.convert('RGB' )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": """Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."""
} , )
lowercase_ : Optional[int] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowercase_ : Tuple = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the training data."""} )
lowercase_ : int = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """A folder containing the validation data."""} )
lowercase_ : Dict = field(
default=0.15 , metadata={"""help""": """Percent to split off of train for validation."""} )
lowercase_ : Any = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
lowercase_ : List[str] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'You must specify either a dataset name from the hub or a train and/or validation directory.' )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
lowercase_ : str = field(
default="""google/vit-base-patch16-224-in21k""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , )
lowercase_ : Optional[Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(SCREAMING_SNAKE_CASE_ )} , )
lowercase_ : int = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
lowercase_ : List[Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
lowercase_ : Optional[Any] = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
lowercase_ : Dict = field(default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Name or path of preprocessor config."""} )
lowercase_ : Union[str, Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
lowercase_ : int = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = torch.stack([example['pixel_values'] for example in examples] )
A_ : int = torch.tensor([example['labels'] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ , A_ , A_ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ , A_ , A_ : List[str] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_image_classification' , lowercase_ , lowercase_ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ : List[str] = training_args.get_process_log_level()
logger.setLevel(lowercase_ )
transformers.utils.logging.set_verbosity(lowercase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
A_ : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='image-classification' , use_auth_token=True if model_args.use_auth_token else None , )
else:
A_ : Optional[Any] = {}
if data_args.train_dir is not None:
A_ : Optional[int] = os.path.join(data_args.train_dir , '**' )
if data_args.validation_dir is not None:
A_ : str = os.path.join(data_args.validation_dir , '**' )
A_ : Tuple = load_dataset(
'imagefolder' , data_files=lowercase_ , cache_dir=model_args.cache_dir , task='image-classification' , )
# If we don't have a validation split, split off a percentage of train as validation.
A_ : str = None if 'validation' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase_ ) and data_args.train_val_split > 0.0:
A_ : Union[str, Any] = dataset['train'].train_test_split(data_args.train_val_split )
A_ : Any = split['train']
A_ : Any = split['test']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ : Dict = dataset['train'].features['labels'].names
A_ , A_ : Optional[Any] = {}, {}
for i, label in enumerate(lowercase_ ):
A_ : Dict = str(lowercase_ )
A_ : Optional[Any] = label
# Load the accuracy metric from the datasets package
A_ : Union[str, Any] = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
A_ : int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(lowercase_ ) , labelaid=lowercase_ , idalabel=lowercase_ , finetuning_task='image-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A_ : str = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
A_ : int = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ : List[str] = image_processor.size['shortest_edge']
else:
A_ : List[str] = (image_processor.size['height'], image_processor.size['width'])
A_ : Dict = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
A_ : Union[str, Any] = Compose(
[
RandomResizedCrop(lowercase_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ : Tuple = Compose(
[
Resize(lowercase_ ),
CenterCrop(lowercase_ ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCAmelCase ):
A_ : List[str] = [
_train_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']
]
return example_batch
def val_transforms(_UpperCAmelCase ):
A_ : Optional[Any] = [_val_transforms(pil_img.convert('RGB' ) ) for pil_img in example_batch['image']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
A_ : List[Any] = (
dataset['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(lowercase_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
A_ : Union[str, Any] = (
dataset['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(lowercase_ )
# Initalize our trainer
A_ : str = Trainer(
model=lowercase_ , args=lowercase_ , train_dataset=dataset['train'] if training_args.do_train else None , eval_dataset=dataset['validation'] if training_args.do_eval else None , compute_metrics=lowercase_ , tokenizer=lowercase_ , data_collator=lowercase_ , )
# Training
if training_args.do_train:
A_ : int = None
if training_args.resume_from_checkpoint is not None:
A_ : int = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ : str = last_checkpoint
A_ : List[Any] = trainer.train(resume_from_checkpoint=lowercase_ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ : Optional[int] = trainer.evaluate()
trainer.log_metrics('eval' , lowercase_ )
trainer.save_metrics('eval' , lowercase_ )
# Write model card and (optionally) push to hub
A_ : Dict = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'image-classification',
'dataset': data_args.dataset_name,
'tags': ['image-classification', 'vision'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase_ )
else:
trainer.create_model_card(**lowercase_ )
if __name__ == "__main__":
main() | 286 |
"""simple docstring"""
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase ( lowercase_ ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(R'^(.*)_\d+\.jpg$' , lowercase_ ).groups()[0]
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :Dict , lowercase_ :List[str]=None , lowercase_ :Optional[Any]=None ) -> Optional[int]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self :Optional[int] ) -> Optional[Any]:
return len(self.file_names )
def __getitem__( self :int , lowercase_ :str ) -> List[str]:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowercase_ )
UpperCAmelCase = raw_image.convert('RGB' )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowercase_ )
UpperCAmelCase = extract_label(lowercase_ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config['lr']
UpperCAmelCase = int(config['num_epochs'] )
UpperCAmelCase = int(config['seed'] )
UpperCAmelCase = int(config['batch_size'] )
UpperCAmelCase = config['image_size']
if not isinstance(lowercase_ , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(lowercase_ )[-1].split('.' )[0]
accelerator.init_trackers(lowercase_ , lowercase_ )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , lowercase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
UpperCAmelCase = [extract_label(lowercase_ ) for fname in file_names]
UpperCAmelCase = list(set(lowercase_ ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(lowercase_ )}
# Set the seed before splitting the data.
np.random.seed(lowercase_ )
torch.manual_seed(lowercase_ )
torch.cuda.manual_seed_all(lowercase_ )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(lowercase_ ) )
UpperCAmelCase = int(0.8 * len(lowercase_ ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(lowercase_ , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(lowercase_ ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=lowercase_ , label_to_id=lowercase_ )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
UpperCAmelCase = DataLoader(lowercase_ , shuffle=lowercase_ , batch_size=lowercase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model('resnet50d' , pretrained=lowercase_ , num_classes=len(lowercase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=lowercase_ , max_lr=lowercase_ , epochs=lowercase_ , steps_per_epoch=len(lowercase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(lowercase_ )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace('epoch_' , '' ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace('step_' , '' ) )
UpperCAmelCase = resume_step // len(lowercase_ )
resume_step -= starting_epoch * len(lowercase_ )
# Now we train the model
for epoch in range(lowercase_ , lowercase_ ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(lowercase_ , lowercase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = torch.nn.functional.cross_entropy(lowercase_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(lowercase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(lowercase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch['image'] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(lowercase_ )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch['label']) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {100 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(lowercase_ ),
'epoch': epoch,
} , step=lowercase_ , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"""epoch_{epoch}"""
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , lowercase_ )
accelerator.save_state(lowercase_ )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase ( ):
UpperCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=lowercase_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=lowercase_ , default=lowercase_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=lowercase_ , default=lowercase_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=lowercase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=lowercase_ , default=lowercase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=lowercase_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(lowercase_ , lowercase_ )
if __name__ == "__main__":
main()
| 78 | 0 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
__lowerCamelCase : List[str] = re.compile(R"""\s+""")
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
return {"hash": hashlib.mda(re.sub(snake_case_ , "" , example["content"] ).encode("utf-8" ) ).hexdigest()}
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : str = [len(snake_case_ ) for line in example["content"].splitlines()]
return {"line_mean": np.mean(snake_case_ ), "line_max": max(snake_case_ )}
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
snake_case__ : Tuple = np.mean([c.isalnum() for c in example["content"]] )
return {"alpha_frac": alpha_frac}
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Any ):
if example["hash"] in uniques:
uniques.remove(example["hash"] )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Dict=5 ):
snake_case__ : Dict = ["auto-generated", "autogenerated", "automatically generated"]
snake_case__ : str = example["content"].splitlines()
for _, line in zip(range(snake_case_ ) , snake_case_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[str]=5 , snake_case_ : Dict=0.05 ):
snake_case__ : int = ["unit tests", "test file", "configuration file"]
snake_case__ : Dict = example["content"].splitlines()
snake_case__ : List[str] = 0
snake_case__ : Any = 0
# first test
for _, line in zip(range(snake_case_ ) , snake_case_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
snake_case__ : Optional[int] = example["content"].count("\n" )
snake_case__ : int = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("config" )
count_test += line.lower().count("test" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
snake_case__ : Any = ["def ", "class ", "for ", "while "]
snake_case__ : Union[str, Any] = example["content"].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int=4 ):
snake_case__ : Optional[int] = example["content"].splitlines()
snake_case__ : Any = 0
for line in lines:
counter += line.lower().count("=" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
snake_case__ : str = tokenizer(example["content"] , truncation=snake_case_ )["input_ids"]
snake_case__ : int = len(example["content"] ) / len(snake_case_ )
return {"ratio": ratio}
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
snake_case__ : Dict = {}
results.update(get_hash(snake_case_ ) )
results.update(line_stats(snake_case_ ) )
results.update(alpha_stats(snake_case_ ) )
results.update(char_token_ratio(snake_case_ ) )
results.update(is_autogenerated(snake_case_ ) )
results.update(is_config_or_test(snake_case_ ) )
results.update(has_no_keywords(snake_case_ ) )
results.update(has_few_assignments(snake_case_ ) )
return results
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Dict ):
if not check_uniques(snake_case_ , snake_case_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
with open(snake_case_ , "rb" ) as f_in:
with gzip.open(str(snake_case_ ) + ".gz" , "wb" , compresslevel=6 ) as f_out:
shutil.copyfileobj(snake_case_ , snake_case_ )
os.unlink(snake_case_ )
# Settings
__lowerCamelCase : Optional[Any] = HfArgumentParser(PreprocessingArguments)
__lowerCamelCase : List[str] = parser.parse_args()
if args.num_workers is None:
__lowerCamelCase : Optional[int] = multiprocessing.cpu_count()
__lowerCamelCase : Dict = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
__lowerCamelCase : int = time.time()
__lowerCamelCase : int = load_dataset(args.dataset_name, split="""train""")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
__lowerCamelCase : Tuple = time.time()
__lowerCamelCase : Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
__lowerCamelCase : Dict = set(ds.unique("""hash"""))
__lowerCamelCase : int = len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
__lowerCamelCase : List[Any] = time.time()
__lowerCamelCase : str = ds.filter(filter, fn_kwargs={"""uniques""": uniques, """args""": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
__lowerCamelCase : Any = time.time()
__lowerCamelCase , __lowerCamelCase : Optional[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
__lowerCamelCase : Union[str, Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / """duplicate_clusters.json""", """w""") as f:
json.dump(duplicate_clusters, f)
__lowerCamelCase : Tuple = output_dir / """data"""
data_dir.mkdir(exist_ok=True)
__lowerCamelCase : Optional[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
__lowerCamelCase : Optional[Any] = str(data_dir / f"file-{file_number+1:012}.json")
__lowerCamelCase : List[str] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 286 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = SMALL_MODEL_IDENTIFIER
snake_case__ : Any = "pt"
snake_case__ : Any = "tf"
def _lowercase ( self : Union[str, Any] , __A : List[Any] ):
snake_case__ : int = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__A )
def _lowercase ( self : Optional[int] , __A : Tuple ):
snake_case__ : List[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__A )
model_tf.save_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = "mock_framework"
# Framework provided - return whatever the user provides
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model , __A )
self.assertEqual(__A , __A )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : Optional[int] = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : int = FeaturesManager.determine_framework(__A , __A )
self.assertEqual(__A , __A )
def _lowercase ( self : Dict ):
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__A )
snake_case__ : List[str] = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__A )
snake_case__ : Tuple = FeaturesManager.determine_framework(__A )
self.assertEqual(__A , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__A ):
snake_case__ : int = FeaturesManager.determine_framework(__A )
def _lowercase ( self : Dict ):
snake_case__ : Dict = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ):
snake_case__ : List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
snake_case__ : Tuple = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : int = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_tf )
# Both in environment -> use PyTorch
snake_case__ : Dict = MagicMock(return_value=__A )
snake_case__ : Optional[int] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__A , self.framework_pt )
# Both not in environment -> raise error
snake_case__ : List[str] = MagicMock(return_value=__A )
snake_case__ : Optional[Any] = MagicMock(return_value=__A )
with patch("transformers.onnx.features.is_tf_available" , __A ), patch(
"transformers.onnx.features.is_torch_available" , __A ):
with self.assertRaises(__A ):
snake_case__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model )
| 286 | 1 |
'''simple docstring'''
def __UpperCAmelCase ( A : Dict ) -> list[list]:
UpperCAmelCase_ : Union[str, Any] = current_set.copy()
for row_index, row in enumerate(A__ ):
UpperCAmelCase_ : Any = row[0]
for column_index, column in enumerate(A__ ):
if magnitude == 0:
UpperCAmelCase_ : List[str] = column
continue
UpperCAmelCase_ : int = column / magnitude
# Subtract to cancel term
UpperCAmelCase_ : Union[str, Any] = current_set[0]
UpperCAmelCase_ : List[Any] = [first_row]
UpperCAmelCase_ : Dict = current_set[1::]
for row in current_set:
UpperCAmelCase_ : str = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(A__ )
continue
for column_index in range(len(A__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(A__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
UpperCAmelCase_ : Dict = final_set[0]
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : Dict = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
UpperCAmelCase_ : Any = simplify(A__ )
for i in range(len(A__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , A__ )
UpperCAmelCase_ : Any = resultant
return final_set
def __UpperCAmelCase ( A : List[Any] ) -> list:
if len(A__ ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
UpperCAmelCase_ : List[Any] = len(A__ ) + 1
if any(len(A__ ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(A__ , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(A__ ) == 1:
return [equations[0][-1] / equations[0][0]]
UpperCAmelCase_ : Dict = equations.copy()
if any(0 in row for row in data_set ):
UpperCAmelCase_ : Dict = data_set.copy()
UpperCAmelCase_ : Tuple = []
for row_index, row in enumerate(A__ ):
if 0 not in row:
UpperCAmelCase_ : str = data_set.pop(A__ )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , A__ )
UpperCAmelCase_ : Tuple = data_set.copy()
UpperCAmelCase_ : Optional[int] = simplify(A__ )
UpperCAmelCase_ : Tuple = simplified[::-1]
UpperCAmelCase_ : list = []
for row in simplified:
UpperCAmelCase_ : Tuple = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
UpperCAmelCase_ : Any = row.copy()[: len(A__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(A__ ) == 0:
solutions.append(0 )
continue
UpperCAmelCase_ : Dict = temp_row[1::]
UpperCAmelCase_ : str = temp_row[::-1]
for column_index, column in enumerate(A__ ):
current_solution -= column * solutions[column_index]
solutions.append(A__ )
UpperCAmelCase_ : List[Any] = []
for item in solutions:
final.append(float(round(A__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase : List[Any] = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 304 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''MobileViTFeatureExtractor''']
lowerCAmelCase__ : Optional[int] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 364 | """simple docstring"""
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 126 | 0 |
SCREAMING_SNAKE_CASE :int = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
10: 'a',
11: 'b',
12: 'c',
13: 'd',
14: 'e',
15: 'f',
}
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
assert type(a_ ) in (int, float) and decimal == int(a_ )
__A = int(a_ )
__A = ""
__A = False
if decimal < 0:
__A = True
decimal *= -1
while decimal > 0:
__A , __A = divmod(a_ , 1_6 )
__A = values[remainder] + hexadecimal
__A = "0x" + hexadecimal
if negative:
__A = "-" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15 | import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
UpperCamelCase_ : Optional[Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Dict = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , )
UpperCamelCase_ : Tuple = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = 'sgugger/tiny-distilbert-classification'
UpperCamelCase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , only_pretrain_model=snake_case , )
UpperCamelCase_ : int = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : str = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : List[str] = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : str = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=snake_case , multi_process=snake_case , )
UpperCamelCase_ : int = TensorFlowBenchmark(snake_case , [config] )
UpperCamelCase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[str] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : int = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : Any = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : Tuple = TensorFlowBenchmark(snake_case , [config] )
UpperCamelCase_ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Tuple = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : Optional[int] = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : int = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : List[Any] = TensorFlowBenchmark(snake_case , [config] )
UpperCamelCase_ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = 'patrickvonplaten/t5-tiny-random'
UpperCamelCase_ : List[str] = AutoConfig.from_pretrained(snake_case )
UpperCamelCase_ : List[str] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=snake_case , )
UpperCamelCase_ : int = TensorFlowBenchmark(snake_case , configs=[config] )
UpperCamelCase_ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 , 'Cannot do xla on CPU.' )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = 'sshleifer/tiny-gpt2'
UpperCamelCase_ : Union[str, Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=snake_case , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , use_xla=snake_case , multi_process=snake_case , )
UpperCamelCase_ : Union[str, Any] = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : List[Any] = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case , save_to_csv=snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(snake_case , 'inf_time.csv' ) , inference_memory_csv_file=os.path.join(snake_case , 'inf_mem.csv' ) , env_info_csv_file=os.path.join(snake_case , 'env.csv' ) , multi_process=snake_case , )
UpperCamelCase_ : List[str] = TensorFlowBenchmark(snake_case )
benchmark.run()
self.assertTrue(Path(os.path.join(snake_case , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(snake_case , 'env.csv' ) ).exists() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(snake_case : Union[str, Any] ):
self.assertTrue(hasattr(snake_case , 'sequential' ) )
self.assertTrue(hasattr(snake_case , 'cumulative' ) )
self.assertTrue(hasattr(snake_case , 'current' ) )
self.assertTrue(hasattr(snake_case , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase_ : str = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(snake_case , 'log.txt' ) , log_print=snake_case , trace_memory_line_by_line=snake_case , eager_mode=snake_case , multi_process=snake_case , )
UpperCamelCase_ : Tuple = TensorFlowBenchmark(snake_case )
UpperCamelCase_ : Union[str, Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(snake_case , 'log.txt' ) ).exists() )
| 175 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
__A = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
__A = {
'''RUCAIBox/mvp''': 1024,
}
class _snake_case ( a__ ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = MvpTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[Any]="replace" , UpperCAmelCase : Dict="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Union[str, Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : List[Any]="<mask>" , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : List[str]=True , **UpperCAmelCase : List[Any] , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
__lowerCamelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
__lowerCamelCase : List[str] = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
__lowerCamelCase : Dict = add_prefix_space
__lowerCamelCase : str = pre_tok_class(**UpperCAmelCase )
__lowerCamelCase : Union[str, Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCamelCase : int = "post_processor"
__lowerCamelCase : List[str] = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
__lowerCamelCase : Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCamelCase : List[str] = tuple(state["sep"] )
if "cls" in state:
__lowerCamelCase : Dict = tuple(state["cls"] )
__lowerCamelCase : str = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
__lowerCamelCase : List[str] = add_prefix_space
__lowerCamelCase : Optional[Any] = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
__lowerCamelCase : Dict = trim_offsets
__lowerCamelCase : Tuple = True
if changes_to_apply:
__lowerCamelCase : List[Any] = getattr(UpperCAmelCase , state.pop("type" ) )
__lowerCamelCase : Union[str, Any] = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def lowerCamelCase__ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self : Any , UpperCAmelCase : Tuple ):
__lowerCamelCase : Dict = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
__lowerCamelCase : List[str] = value
def lowerCamelCase__ ( self : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ):
__lowerCamelCase : List[str] = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ):
__lowerCamelCase : Optional[Any] = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
__lowerCamelCase : Tuple = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[Any]=None ):
__lowerCamelCase : Dict = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Union[str, Any] = [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 64 | """simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A = logging.get_logger(__name__)
__A = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowercase_ ( _lowerCamelCase: str ) -> int:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__lowerCamelCase : int = model_type_to_module_name(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = importlib.import_module(F""".{module_name}""" , "transformers.models" )
try:
return getattr(_lowerCamelCase , _lowerCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_lowerCamelCase , "__name__" , _lowerCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__lowerCamelCase : int = importlib.import_module("transformers" )
if hasattr(_lowerCamelCase , _lowerCamelCase ):
return getattr(_lowerCamelCase , _lowerCamelCase )
return None
def lowercase_ ( _lowerCamelCase: Union[str, os.PathLike] , _lowerCamelCase: Optional[Union[str, os.PathLike]] = None , _lowerCamelCase: bool = False , _lowerCamelCase: bool = False , _lowerCamelCase: Optional[Dict[str, str]] = None , _lowerCamelCase: Optional[Union[bool, str]] = None , _lowerCamelCase: Optional[str] = None , _lowerCamelCase: bool = False , **_lowerCamelCase: Tuple , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase : List[str] = get_file_from_repo(
_lowerCamelCase , _lowerCamelCase , cache_dir=_lowerCamelCase , force_download=_lowerCamelCase , resume_download=_lowerCamelCase , proxies=_lowerCamelCase , use_auth_token=_lowerCamelCase , revision=_lowerCamelCase , local_files_only=_lowerCamelCase , )
if resolved_config_file is None:
logger.info(
"Could not locate the image processor configuration file, will try to use the model config instead." )
return {}
with open(_lowerCamelCase , encoding="utf-8" ) as reader:
return json.load(_lowerCamelCase )
class _snake_case :
def __init__( self : Tuple ):
raise EnvironmentError(
"AutoImageProcessor is designed to be instantiated "
"using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase )
def lowerCamelCase__ ( cls : Dict , UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ):
__lowerCamelCase : int = kwargs.pop("config" , UpperCAmelCase )
__lowerCamelCase : Dict = kwargs.pop("trust_remote_code" , UpperCAmelCase )
__lowerCamelCase : Any = True
__lowerCamelCase , __lowerCamelCase : str = ImageProcessingMixin.get_image_processor_dict(UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : Optional[int] = config_dict.get("image_processor_type" , UpperCAmelCase )
__lowerCamelCase : List[Any] = None
if "AutoImageProcessor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : List[str] = config_dict["auto_map"]["AutoImageProcessor"]
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__lowerCamelCase : Dict = config_dict.pop("feature_extractor_type" , UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
"Could not find image processor class in the image processor config or the model config. Loading"
" based on pattern matching with the model's feature extractor configuration." )
__lowerCamelCase : Tuple = feature_extractor_class.replace("FeatureExtractor" , "ImageProcessor" )
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
__lowerCamelCase : Any = config_dict["auto_map"]["AutoFeatureExtractor"]
__lowerCamelCase : Optional[int] = feature_extractor_auto_map.replace("FeatureExtractor" , "ImageProcessor" )
logger.warning(
"Could not find image processor auto map in the image processor config or the model config."
" Loading based on pattern matching with the model's feature extractor configuration." )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : int = AutoConfig.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
# It could be in `config.image_processor_type``
__lowerCamelCase : int = getattr(UpperCAmelCase , "image_processor_type" , UpperCAmelCase )
if hasattr(UpperCAmelCase , "auto_map" ) and "AutoImageProcessor" in config.auto_map:
__lowerCamelCase : Optional[int] = config.auto_map["AutoImageProcessor"]
if image_processor_class is not None:
__lowerCamelCase : Any = image_processor_class_from_name(UpperCAmelCase )
__lowerCamelCase : str = image_processor_auto_map is not None
__lowerCamelCase : Optional[Any] = image_processor_class is not None or type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
__lowerCamelCase : Dict = resolve_trust_remote_code(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if has_remote_code and trust_remote_code:
__lowerCamelCase : Optional[Any] = get_class_from_dynamic_module(
UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
__lowerCamelCase : List[Any] = kwargs.pop("code_revision" , UpperCAmelCase )
if os.path.isdir(UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
__lowerCamelCase : Tuple = IMAGE_PROCESSOR_MAPPING[type(UpperCAmelCase )]
return image_processor_class.from_dict(UpperCAmelCase , **UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase__ ( UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] ):
IMAGE_PROCESSOR_MAPPING.register(UpperCAmelCase , UpperCAmelCase ) | 64 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
_lowerCAmelCase = 42
_lowerCAmelCase = None
_lowerCAmelCase = None
def _A () -> Node | None:
'''simple docstring'''
_a = Node(1 )
_a = Node(2 )
_a = Node(3 )
_a = Node(4 )
_a = Node(5 )
return tree
def _A (lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _A (lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _A (lowerCAmelCase__ :Node | None ) -> list[int]:
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _A (lowerCAmelCase__ :Node | None ) -> int:
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _A (lowerCAmelCase__ :Node | None ) -> Sequence[Node | None]:
'''simple docstring'''
_a = []
if root is None:
return output
_a = deque([root] )
while process_queue:
_a = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _A (lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> Sequence[Node | None]:
'''simple docstring'''
_a = []
def populate_output(lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def _A (lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> Sequence[Node | None]:
'''simple docstring'''
_a = []
def populate_output(lowerCAmelCase__ :Node | None , lowerCAmelCase__ :int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowerCAmelCase__ , lowerCAmelCase__ )
return output
def _A (lowerCAmelCase__ :Node | None ) -> Sequence[Node | None] | list[Any]:
'''simple docstring'''
if root is None:
return []
_a = []
_a = 0
_a = height(lowerCAmelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowerCAmelCase__ , lowerCAmelCase__ ) )
_a = 1
else:
output.append(get_nodes_from_right_to_left(lowerCAmelCase__ , lowerCAmelCase__ ) )
_a = 0
return output
def _A () -> None: # Main function for testing.
'''simple docstring'''
_a = make_tree()
print(f'In-order Traversal: {inorder(lowerCAmelCase__ )}' )
print(f'Pre-order Traversal: {preorder(lowerCAmelCase__ )}' )
print(f'Post-order Traversal: {postorder(lowerCAmelCase__ )}' , '\n' )
print(f'Height of Tree: {height(lowerCAmelCase__ )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(lowerCAmelCase__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(lowerCAmelCase__ ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(lowerCAmelCase__ , level=lowerCAmelCase__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 168 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> int:
'''simple docstring'''
return int(input_a == input_a == 0 )
def _A () -> None:
'''simple docstring'''
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 168 | 1 |
import datasets
__A = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
__A = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
__A = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def __a ( lowerCAmelCase_ : Dict ,lowerCAmelCase_ : int ) -> List[str]:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : List[Any] ) -> Optional[int]:
return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
| 354 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
__A = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
__A = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
__A = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class lowercase ( datasets.Metric):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : List[List[List[str]]] , __UpperCAmelCase : List[List[str]] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : int = 4 , ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCAmelCase , hypotheses=__UpperCAmelCase , min_len=__UpperCAmelCase , max_len=__UpperCAmelCase )
}
| 277 | 0 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
a : int = tf.convert_to_tensor(lowercase_ )
a : Union[str, Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( A_ )-> int:
'''simple docstring'''
a : List[str] = tf.convert_to_tensor(lowercase_ )
a : Any = tf.cast(math.pi , x.dtype )
a : Tuple = tf.cast(0.0_4_4_7_1_5 , x.dtype )
a : Dict = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowercase_ , 3 )) ))
return x * cdf
def lowercase ( A_ )-> List[str]:
'''simple docstring'''
a : Dict = tf.convert_to_tensor(lowercase_ )
return x * tf.tanh(tf.math.softplus(lowercase_ ) )
def lowercase ( A_ )-> str:
'''simple docstring'''
a : Union[str, Any] = tf.convert_to_tensor(lowercase_ )
a : str = tf.cast(0.0_4_4_7_1_5 , x.dtype )
a : List[Any] = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
a : Union[str, Any] = tf.convert_to_tensor(lowercase_ )
a : List[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( A_ )-> Optional[int]:
'''simple docstring'''
return tf.clip_by_value(_gelu(lowercase_ ) , -10 , 10 )
def lowercase ( A_ , A_=-1 )-> Union[str, Any]:
'''simple docstring'''
a , a : Dict = tf.split(lowercase_ , 2 , axis=lowercase_ )
return a * tf.math.sigmoid(lowercase_ )
if version.parse(tf.version.VERSION) >= version.parse("""2.4"""):
def lowercase ( A_ )-> Any:
'''simple docstring'''
return tf.keras.activations.gelu(lowercase_ , approximate=lowercase_ )
__lowercase = tf.keras.activations.gelu
__lowercase = approximate_gelu_wrap
else:
__lowercase = _gelu
__lowercase = _gelu_new
__lowercase = {
"""gelu""": gelu,
"""gelu_10""": gelu_aa,
"""gelu_fast""": gelu_fast,
"""gelu_new""": gelu_new,
"""glu""": glu,
"""mish""": mish,
"""quick_gelu""": quick_gelu,
"""relu""": tf.keras.activations.relu,
"""sigmoid""": tf.keras.activations.sigmoid,
"""silu""": tf.keras.activations.swish,
"""swish""": tf.keras.activations.swish,
"""tanh""": tf.keras.activations.tanh,
}
def lowercase ( A_ )-> Dict:
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 40 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( A_, A_, unittest.TestCase ):
lowercase__ = IFPipeline
lowercase__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __magic_name__ ( self : Dict ) -> List[str]:
'''simple docstring'''
return self._get_dummy_components()
def __magic_name__ ( self : int , snake_case_ : Tuple , snake_case_ : Union[str, Any]=0 ) -> Optional[int]:
'''simple docstring'''
if str(snake_case_ ).startswith("mps" ):
A__ = torch.manual_seed(snake_case_ )
else:
A__ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __magic_name__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __magic_name__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self._test_save_load_local()
def __magic_name__ ( self : List[str] ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : Optional[int] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
A__ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
A__ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case_ , tokenizer=snake_case_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
A__, A__ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A__ = None
A__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A__ = IFImgaImgPipeline(**pipe_a.components )
A__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A__ = IFInpaintingPipeline(**pipe_a.components )
A__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Any , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : str ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : int ) -> Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_start_torch_memory_measurement()
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(snake_case_ )
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type="np" , )
A__ = output.images[0]
assert image.shape == (64, 64, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A__ = torch.Generator(device="cpu" ).manual_seed(0 )
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(snake_case_ )
A__ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(snake_case_ )
A__ = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , )
A__ = output.images[0]
assert image.shape == (256, 256, 3)
A__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A__ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 247 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _A ( __lowercase ):
def lowercase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Any = tempfile.mkdtemp()
__snake_case : Dict = 8
# DPR tok
__snake_case : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__snake_case : Tuple = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
__snake_case : Dict = os.path.join(__magic_name__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
__snake_case : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__snake_case : int = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
__snake_case : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case : Tuple = {"""unk_token""": """<unk>"""}
__snake_case : Union[str, Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
__snake_case : Optional[int] = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case : Dict = os.path.join(__magic_name__ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__magic_name__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__magic_name__ ) )
def lowercase__ ( self : Tuple ) -> DPRQuestionEncoderTokenizer:
"""simple docstring"""
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def lowercase__ ( self : Optional[int] ) -> BartTokenizer:
"""simple docstring"""
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = os.path.join(self.tmpdirname , """rag_tokenizer""" )
__snake_case : Union[str, Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__snake_case : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__magic_name__ )
rag_tokenizer.save_pretrained(__magic_name__ )
__snake_case : Tuple = RagTokenizer.from_pretrained(__magic_name__ , config=__magic_name__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __magic_name__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __magic_name__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def lowercase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__snake_case : List[str] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
__snake_case : Dict = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
__snake_case : Union[str, Any] = tokenizer(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
@slow
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__snake_case : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
__snake_case : Union[str, Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
__snake_case : Union[str, Any] = tokenizer(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 13 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
A__ = logging.getLogger(__name__)
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class __lowerCAmelCase :
__lowerCamelCase = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
__lowerCamelCase = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
__lowerCamelCase = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__lowerCamelCase = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , snake_case )
# Set seed
set_seed(training_args.seed )
try:
_lowerCAmelCase = processors[data_args.task_name]()
_lowerCAmelCase = processor.get_labels()
_lowerCAmelCase = len(snake_case )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=snake_case , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_lowerCAmelCase = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=snake_case , cache_dir=model_args.cache_dir , )
# Get datasets
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_lowerCAmelCase = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=snake_case , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(snake_case ) -> Dict:
_lowerCAmelCase = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(snake_case , p.label_ids )}
# Data collator
_lowerCAmelCase = DataCollatorWithPadding(snake_case , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_lowerCAmelCase = Trainer(
model=snake_case , args=snake_case , train_dataset=snake_case , eval_dataset=snake_case , compute_metrics=snake_case , data_collator=snake_case , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_lowerCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_lowerCAmelCase = trainer.evaluate()
_lowerCAmelCase = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(snake_case , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , snake_case , snake_case )
writer.write("""%s = %s\n""" % (key, value) )
results.update(snake_case )
return results
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 82 |
def _UpperCAmelCase ( snake_case = 10_00 ):
"""simple docstring"""
_lowerCAmelCase = -1
_lowerCAmelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_lowerCAmelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
_lowerCAmelCase = n - a - b
if c * c == (a * a + b * b):
_lowerCAmelCase = a * b * c
if candidate >= product:
_lowerCAmelCase = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 82 | 1 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __lowerCamelCase ( __a :Tuple , __a :Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
A__ = Image.open(requests.get(__a , stream=__a ).raw ).convert("""RGB""" )
A__ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ),
] )
A__ = transform(__a ).unsqueeze(0 ).to(__a )
return image
def __lowerCamelCase ( __a :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
if "visual_encoder" in key:
A__ = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __a )
if "blocks" in key:
A__ = re.sub(R"""blocks""" , """layers""" , __a )
if "attn" in key:
A__ = re.sub(R"""attn""" , """self_attn""" , __a )
if "norm1" in key:
A__ = re.sub(R"""norm1""" , """layer_norm1""" , __a )
if "norm2" in key:
A__ = re.sub(R"""norm2""" , """layer_norm2""" , __a )
if "encoder.norm" in key:
A__ = re.sub(R"""encoder.norm""" , """post_layernorm""" , __a )
if "encoder.patch_embed.proj" in key:
A__ = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __a )
if "encoder.pos_embed" in key:
A__ = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , __a )
if "encoder.cls_token" in key:
A__ = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , __a )
if "self_attn" in key:
A__ = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , __a )
return key
@torch.no_grad()
def __lowerCamelCase ( __a :Optional[int] , __a :Dict=None ) -> Union[str, Any]:
"""simple docstring"""
if config_path is not None:
A__ = BlipConfig.from_pretrained(__a )
else:
A__ = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
A__ = BlipForConditionalGeneration(__a ).eval()
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
A__ = blip_decoder(pretrained=__a , image_size=3_8_4 , vit="""base""" )
A__ = pt_model.eval()
A__ = pt_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(__a )
A__ = rename_key(__a )
A__ = value
hf_model.load_state_dict(__a )
A__ = 3_8_4
A__ = load_demo_image(image_size=__a , device="""cpu""" )
A__ = BertTokenizer.from_pretrained("""bert-base-uncased""" )
A__ = tokenizer(["""a picture of"""] ).input_ids
A__ = hf_model.generate(__a , __a )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
A__ = hf_model.generate(__a )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__a )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
A__ = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
A__ = blip_vqa(pretrained=__a , image_size=__a , vit="""base""" )
vqa_model.eval()
A__ = vqa_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(__a )
A__ = rename_key(__a )
A__ = value
A__ = BlipForQuestionAnswering(__a )
hf_vqa_model.load_state_dict(__a )
A__ = ["""How many dogs are in this image?"""]
A__ = tokenizer(__a , return_tensors="""pt""" ).input_ids
A__ = hf_vqa_model.generate(__a , __a )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
A__ = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
A__ = blip_itm(pretrained=__a , image_size=__a , vit="""base""" )
itm_model.eval()
A__ = itm_model.state_dict()
for key in modified_state_dict.copy():
A__ = modified_state_dict.pop(__a )
A__ = rename_key(__a )
A__ = value
A__ = BlipForImageTextRetrieval(__a )
A__ = ["""A picture of a woman with a dog sitting in a beach"""]
A__ = tokenizer(
__a , return_tensors="""pt""" , padding="""max_length""" , truncation=__a , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(__a )
hf_itm_model.eval()
A__ = hf_itm_model(__a , __a , use_itm_head=__a )
A__ = hf_itm_model(__a , __a , use_itm_head=__a )
assert out[0].item() == 0.2110687494277954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
A : Union[str, Any] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 355 |
import unittest
import numpy as np
def __lowerCamelCase ( __a :np.ndarray , __a :np.ndarray , __a :np.ndarray , __a :np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
A__ = np.shape(__a )
A__ = np.shape(__a )
A__ = np.shape(__a )
if shape_a[0] != shape_b[0]:
A__ = (
"""Expected the same number of rows for A and B. """
F'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(__a )
if shape_b[1] != shape_c[1]:
A__ = (
"""Expected the same number of columns for B and C. """
F'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(__a )
A__ = pseudo_inv
if a_inv is None:
try:
A__ = np.linalg.inv(__a )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Union[str, Any] ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
A__ = schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
A__ = np.block([[a, b], [b.T, c]] )
A__ = np.linalg.det(__lowerCAmelCase )
A__ = np.linalg.det(__lowerCAmelCase )
A__ = np.linalg.det(__lowerCAmelCase )
self.assertAlmostEqual(__lowerCAmelCase , det_a * det_s )
def a_ ( self : str ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : List[str] ) -> None:
"""simple docstring"""
A__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
A__ = np.array([[0, 3], [3, 0], [2, 3]] )
A__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(__lowerCAmelCase ):
schur_complement(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 276 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ :Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def lowercase__ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ :List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
lowercase_ :Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(lowercase )
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.dummy_uncond_unet
lowercase_ :int = DDIMScheduler()
lowercase_ :Any = self.dummy_vq_model
lowercase_ :int = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
lowercase_ :List[Any] = torch.manual_seed(0 )
lowercase_ :List[str] = ldm(generator=lowercase , num_inference_steps=2 , output_type="numpy" ).images
lowercase_ :str = torch.manual_seed(0 )
lowercase_ :str = ldm(generator=lowercase , num_inference_steps=2 , output_type="numpy" , return_dict=lowercase )[0]
lowercase_ :Optional[int] = image[0, -3:, -3:, -1]
lowercase_ :int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase_ :Optional[int] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
lowercase_ :List[str] = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
lowercase_ :Dict = torch.manual_seed(0 )
lowercase_ :List[str] = ldm(generator=lowercase , num_inference_steps=5 , output_type="numpy" ).images
lowercase_ :List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase_ :List[Any] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
lowercase_ :int = 1e-2 if torch_device != "mps" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 223 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[Any] =logging.get_logger(__name__)
lowerCAmelCase : Tuple ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class a_ ( _lowerCAmelCase ):
__A = "vit_mae"
def __init__( self : Any , lowercase : int=768 , lowercase : Tuple=12 , lowercase : str=12 , lowercase : Optional[Any]=3_072 , lowercase : List[Any]="gelu" , lowercase : Tuple=0.0 , lowercase : Union[str, Any]=0.0 , lowercase : str=0.02 , lowercase : Optional[int]=1e-1_2 , lowercase : List[Any]=224 , lowercase : str=16 , lowercase : List[str]=3 , lowercase : Optional[Any]=True , lowercase : int=16 , lowercase : Optional[Any]=512 , lowercase : Optional[Any]=8 , lowercase : Optional[Any]=2_048 , lowercase : List[str]=0.75 , lowercase : str=False , **lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowercase )
lowercase_ :Any = hidden_size
lowercase_ :Optional[Any] = num_hidden_layers
lowercase_ :Optional[Any] = num_attention_heads
lowercase_ :int = intermediate_size
lowercase_ :Optional[int] = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :str = initializer_range
lowercase_ :Optional[int] = layer_norm_eps
lowercase_ :str = image_size
lowercase_ :Union[str, Any] = patch_size
lowercase_ :Dict = num_channels
lowercase_ :Any = qkv_bias
lowercase_ :Optional[int] = decoder_num_attention_heads
lowercase_ :Optional[Any] = decoder_hidden_size
lowercase_ :Union[str, Any] = decoder_num_hidden_layers
lowercase_ :List[Any] = decoder_intermediate_size
lowercase_ :Optional[Any] = mask_ratio
lowercase_ :Optional[Any] = norm_pix_loss
| 223 | 1 |
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=0 ) -> Optional[Any]:
# Format the message.
if name is None:
UpperCamelCase_ = None
else:
UpperCamelCase_ = "." * max(0 , spaces - 2 ) + "# {:" + str(50 - spaces ) + "s}"
UpperCamelCase_ = fmt.format(UpperCamelCase_ )
# Print and recurse (if needed).
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if msg is not None:
print(UpperCamelCase_ )
for k in val.keys():
recursive_print(UpperCamelCase_ , val[k] , spaces + 2 )
elif isinstance(UpperCamelCase_ , torch.Tensor ):
print(UpperCamelCase_ , ":" , val.size() )
else:
print(UpperCamelCase_ , ":" , UpperCamelCase_ )
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
UpperCamelCase_ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
UpperCamelCase_ = (num_heads, hidden_size, num_splits) + input_shape[1:]
UpperCamelCase_ = param.view(*UpperCamelCase_ )
UpperCamelCase_ = param.transpose(0 , 2 )
UpperCamelCase_ = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
UpperCamelCase_ = (num_heads, num_splits, hidden_size) + input_shape[1:]
UpperCamelCase_ = param.view(*UpperCamelCase_ )
UpperCamelCase_ = param.transpose(0 , 1 ).contiguous()
UpperCamelCase_ = param.view(*UpperCamelCase_ )
return param
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]:
# The converted output model.
UpperCamelCase_ = {}
# old versions did not store training args
UpperCamelCase_ = input_state_dict.get("args" , UpperCamelCase_ )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
UpperCamelCase_ = ds_args.padded_vocab_size
UpperCamelCase_ = ds_args.max_position_embeddings
UpperCamelCase_ = ds_args.hidden_size
UpperCamelCase_ = ds_args.num_layers
UpperCamelCase_ = ds_args.num_attention_heads
UpperCamelCase_ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
UpperCamelCase_ = config.n_head
# The hidden_size per head.
UpperCamelCase_ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
UpperCamelCase_ = input_state_dict["checkpoint_version"]
else:
UpperCamelCase_ = 0.0
# The model.
UpperCamelCase_ = input_state_dict["model"]
# The language model.
UpperCamelCase_ = model["language_model"]
# The embeddings.
UpperCamelCase_ = lm["embedding"]
# The word embeddings.
UpperCamelCase_ = embeddings["word_embeddings"]["weight"]
# Truncate the embedding table to vocab_size rows.
UpperCamelCase_ = word_embeddings[: config.vocab_size, :]
UpperCamelCase_ = word_embeddings
# The position embeddings.
UpperCamelCase_ = embeddings["position_embeddings"]["weight"]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
UpperCamelCase_ = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
UpperCamelCase_ = pos_embeddings
# The transformer.
UpperCamelCase_ = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"]
# The regex to extract layer names.
UpperCamelCase_ = re.compile(r"layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)" )
# The simple map of names for "automated" rules.
UpperCamelCase_ = {
"attention.dense": ".attn.c_proj.",
"self_attention.dense": ".attn.c_proj.",
"mlp.dense_h_to_4h": ".mlp.c_fc.",
"mlp.dense_4h_to_h": ".mlp.c_proj.",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
UpperCamelCase_ = layer_re.match(UpperCamelCase_ )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
UpperCamelCase_ = int(m.group(1 ) )
# The name of the operation.
UpperCamelCase_ = m.group(2 )
# Is it a weight or a bias?
UpperCamelCase_ = m.group(3 )
# The name of the layer.
UpperCamelCase_ = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith("layernorm" ):
UpperCamelCase_ = "ln_1" if op_name.startswith("input" ) else "ln_2"
UpperCamelCase_ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
UpperCamelCase_ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , UpperCamelCase_ , UpperCamelCase_ )
UpperCamelCase_ = causal_mask
# Insert a "dummy" tensor for masked_bias.
UpperCamelCase_ = torch.tensor(-1e4 , dtype=torch.floataa )
UpperCamelCase_ = masked_bias
UpperCamelCase_ = fix_query_key_value_ordering(UpperCamelCase_ , UpperCamelCase_ , 3 , UpperCamelCase_ , UpperCamelCase_ )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
UpperCamelCase_ = out_val.transpose(0 , 1 ).contiguous()
# Store.
UpperCamelCase_ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
UpperCamelCase_ = fix_query_key_value_ordering(UpperCamelCase_ , UpperCamelCase_ , 3 , UpperCamelCase_ , UpperCamelCase_ )
# Store. No change of shape.
UpperCamelCase_ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
UpperCamelCase_ = megatron_to_transformers[op_name]
UpperCamelCase_ = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
UpperCamelCase_ = megatron_to_transformers[op_name]
UpperCamelCase_ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
UpperCamelCase_ = transformer["final_layernorm.weight"]
UpperCamelCase_ = transformer["final_layernorm.bias"]
# For LM head, transformers' wants the matrix to weight embeddings.
UpperCamelCase_ = word_embeddings
# It should be done!
return output_state_dict
def lowerCAmelCase_ ( ) -> int:
# Create the argument parser.
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--print-checkpoint-structure" , action="store_true" )
parser.add_argument(
"path_to_checkpoint" , type=UpperCamelCase_ , help="Path to the checkpoint file (.zip archive or direct .pt file)" , )
parser.add_argument(
"--config_file" , default="" , type=UpperCamelCase_ , help="An optional config json file describing the pre-trained model." , )
UpperCamelCase_ = parser.parse_args()
# Extract the basename.
UpperCamelCase_ = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith(".zip" ):
with zipfile.ZipFile(args.path_to_checkpoint , "r" ) as checkpoint:
with checkpoint.open("release/mp_rank_00/model_optim_rng.pt" ) as pytorch_dict:
UpperCamelCase_ = torch.load(UpperCamelCase_ , map_location="cpu" )
else:
UpperCamelCase_ = torch.load(args.path_to_checkpoint , map_location="cpu" )
UpperCamelCase_ = input_state_dict.get("args" , UpperCamelCase_ )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
UpperCamelCase_ = "gelu_fast"
elif ds_args.openai_gelu:
UpperCamelCase_ = "gelu_new"
else:
UpperCamelCase_ = "gelu"
else:
# in the very early days this used to be "gelu_new"
UpperCamelCase_ = "gelu_new"
# Spell out all parameters in case the defaults change.
UpperCamelCase_ = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=UpperCamelCase_ , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type="cls_index" , summary_use_proj=UpperCamelCase_ , summary_activation=UpperCamelCase_ , summary_proj_to_labels=UpperCamelCase_ , summary_first_dropout=0.1 , scale_attn_weights=UpperCamelCase_ , use_cache=UpperCamelCase_ , bos_token_id=50256 , eos_token_id=50256 , )
else:
UpperCamelCase_ = GPTaConfig.from_json_file(args.config_file )
UpperCamelCase_ = ["GPT2LMHeadModel"]
# Convert.
print("Converting" )
UpperCamelCase_ = convert_megatron_checkpoint(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(UpperCamelCase_ , UpperCamelCase_ )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
UpperCamelCase_ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
UpperCamelCase_ = "gpt2"
elif tokenizer_type == "PretrainedFromHF":
UpperCamelCase_ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
UpperCamelCase_ = "gpt2"
UpperCamelCase_ = AutoTokenizer.from_pretrained(UpperCamelCase_ )
UpperCamelCase_ = type(UpperCamelCase_ ).__name__
UpperCamelCase_ = tokenizer_class
# Store the config to file.
print("Saving config" )
config.save_pretrained(UpperCamelCase_ )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(UpperCamelCase_ )
# Store the state_dict to file.
UpperCamelCase_ = os.path.join(UpperCamelCase_ , "pytorch_model.bin" )
print(F'''Saving checkpoint to "{output_checkpoint_file}"''' )
torch.save(UpperCamelCase_ , UpperCamelCase_ )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 328 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ) -> str:
UpperCamelCase_ = BeautifulSoup(requests.get(UpperCamelCase_ , params=UpperCamelCase_ ).content , "html.parser" )
UpperCamelCase_ = soup.find("div" , attrs={"class": "gs_ri"} )
UpperCamelCase_ = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_UpperCAmelCase = {
'title': (
'Precisely geometry controlled microsupercapacitors for ultrahigh areal '
'capacitance, volumetric capacitance, and energy density'
),
'journal': 'Chem. Mater.',
'volume': 3_0,
'pages': '3979-3990',
'year': 2_0_1_8,
'hl': 'en',
}
print(get_citation('https://scholar.google.com/scholar_lookup', params=params))
| 328 | 1 |
'''simple docstring'''
import math
def _A ( snake_case ) -> Union[str, Any]:
_lowercase : Optional[int] = []
_lowercase : Optional[int] = 2
_lowercase : Tuple = int(math.sqrt(A__ ) ) # Size of every segment
_lowercase : Optional[int] = [True] * (end + 1)
_lowercase : Union[str, Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(A__ )
for i in range(start * start , end + 1 , A__ ):
_lowercase : str = False
start += 1
prime += in_prime
_lowercase : Union[str, Any] = end + 1
_lowercase : str = min(2 * end , A__ )
while low <= n:
_lowercase : Optional[int] = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(A__ , high + 1 , A__ ):
_lowercase : Dict = False
for j in range(len(A__ ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Optional[Any] = high + 1
_lowercase : str = min(high + end , A__ )
return prime
print(sieve(10**6))
| 250 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__A )
class UpperCamelCase_ (__A ):
def __init__( self : int , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : Optional[int]=None ) -> List[Any]:
UpperCAmelCase_ : str = {}
if top_k is not None:
UpperCAmelCase_ : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self : str , lowerCAmelCase_ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **lowerCAmelCase_ : Any ) -> Tuple:
return super().__call__(lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Tuple = load_image(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.image_processor(images=lowerCAmelCase_ , return_tensors=self.framework )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Any = self.model(**lowerCAmelCase_ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=5 ) -> Any:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : int = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : str = model_outputs.logits.softmax(-1 )[0]
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = probs.topk(lowerCAmelCase_ )
elif self.framework == "tf":
UpperCAmelCase_ : str = stable_softmax(model_outputs.logits , axis=-1 )[0]
UpperCAmelCase_ : Union[str, Any] = tf.math.top_k(lowerCAmelCase_ , k=lowerCAmelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
UpperCAmelCase_ : int = scores.tolist()
UpperCAmelCase_ : Optional[Any] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase_ , lowerCAmelCase_ )]
| 268 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'git_vision_model'
def __init__( self , _a=768 , _a=3_072 , _a=12 , _a=12 , _a=3 , _a=224 , _a=16 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.02 , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = num_channels
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'git'
def __init__( self , _a=None , _a=30_522 , _a=768 , _a=6 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_024 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=True , _a=False , _a=101 , _a=102 , _a=None , **_a , ):
super().__init__(bos_token_id=_a , eos_token_id=_a , pad_token_id=_a , **_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__a = GitVisionConfig(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = tie_word_embeddings
__a = num_image_with_embedding
__a = bos_token_id
__a = eos_token_id
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 11 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spiece.model"""}
lowercase_ = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
lowercase_ = {
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''attention_mask''']
UpperCamelCase = []
def __init__( self , A , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , A = None , **A , ) -> None:
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
_SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A , eos_token=A , unk_token=A , pad_token=A , sep_token=A , mask_token=A , cls_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
_SCREAMING_SNAKE_CASE = vocab_file
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def snake_case_( self ) -> Optional[int]:
return self.sp_model.get_piece_size()
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.__dict__.copy()
_SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , A ) -> Dict:
_SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_( self , A ) -> List[str]:
return self.sp_model.encode(A , out_type=A )
def snake_case_( self , A ) -> Optional[Any]:
return self.sp_model.piece_to_id(A )
def snake_case_( self , A ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.sp_model.IdToPiece(A )
return token
def snake_case_( self , A ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = """"""
_SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(A )
_SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def snake_case_( self , A , A = False , A = None , A = True , **A , ) -> str:
_SCREAMING_SNAKE_CASE = kwargs.pop("""use_source_tokenizer""" , A )
_SCREAMING_SNAKE_CASE = self.convert_ids_to_tokens(A , skip_special_tokens=A )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
_SCREAMING_SNAKE_CASE = []
sub_texts.append(A )
else:
current_sub_text.append(A )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(A ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
_SCREAMING_SNAKE_CASE = re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(A ) )
else:
_SCREAMING_SNAKE_CASE = """""".join(A )
_SCREAMING_SNAKE_CASE = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_SCREAMING_SNAKE_CASE = self.clean_up_tokenization(A )
return clean_text
else:
return text
def snake_case_( self , A , A = None ) -> Tuple[str]:
if not os.path.isdir(A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_SCREAMING_SNAKE_CASE = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , """wb""" ) as fi:
_SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
def snake_case_( self , A , A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case_( self , A , A = None , A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
def snake_case_( self , A , A = None ) -> List[int]:
_SCREAMING_SNAKE_CASE = [self.sep_token_id]
_SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 58 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case_:
def __init__( self : Dict , UpperCamelCase_ : str , UpperCamelCase_ : Dict=1_3 , UpperCamelCase_ : Union[str, Any]=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : int=3 , UpperCamelCase_ : Any=1_6 , UpperCamelCase_ : int=[1, 2, 1] , UpperCamelCase_ : Optional[int]=[2, 2, 4] , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Any=2.0 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : int=0.0 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Union[str, Any]=False , UpperCamelCase_ : Any=True , UpperCamelCase_ : List[Any]=0.02 , UpperCamelCase_ : Tuple=1E-5 , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : str=True , UpperCamelCase_ : List[Any]=1_0 , UpperCamelCase_ : Dict=8 , ):
lowerCAmelCase : Union[str, Any] = parent
lowerCAmelCase : int = batch_size
lowerCAmelCase : List[str] = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : int = num_channels
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Any = depths
lowerCAmelCase : Any = num_heads
lowerCAmelCase : int = window_size
lowerCAmelCase : List[Any] = mlp_ratio
lowerCAmelCase : int = qkv_bias
lowerCAmelCase : Optional[Any] = hidden_dropout_prob
lowerCAmelCase : str = attention_probs_dropout_prob
lowerCAmelCase : str = drop_path_rate
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : int = use_absolute_embeddings
lowerCAmelCase : Union[str, Any] = patch_norm
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : str = initializer_range
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = scope
lowerCAmelCase : List[str] = use_labels
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : Union[str, Any] = encoder_stride
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : str , UpperCamelCase_ : Dict ):
lowerCAmelCase : List[str] = SwinvaModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
lowerCAmelCase : Tuple = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int] ):
lowerCAmelCase : Tuple = SwinvaForMaskedImageModeling(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Dict = model(UpperCamelCase_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[str] = SwinvaForMaskedImageModeling(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : int ):
lowerCAmelCase : List[str] = self.type_sequence_label_size
lowerCAmelCase : Optional[Any] = SwinvaForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_( a__ , a__ , unittest.TestCase ):
__UpperCamelCase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__UpperCamelCase = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Dict = SwinvaModelTester(self )
lowerCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=3_7 )
def lowerCamelCase__ ( self : Optional[int] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def lowerCamelCase__ ( self : Dict ):
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def lowerCamelCase__ ( self : int ):
pass
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Optional[Any] = True
for model_class in self.all_model_classes:
lowerCAmelCase : Any = True
lowerCAmelCase : List[str] = False
lowerCAmelCase : int = True
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : str = outputs.attentions
lowerCAmelCase : int = len(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase : Any = True
lowerCAmelCase : Union[str, Any] = config.window_size**2
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase : str = len(UpperCamelCase_ )
# Check attention is always last and order is fine
lowerCAmelCase : Optional[int] = True
lowerCAmelCase : int = True
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCAmelCase : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase : Union[str, Any] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase_ ) )
lowerCAmelCase : List[str] = outputs.attentions
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ):
lowerCAmelCase : int = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Union[str, Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : str = outputs.hidden_states
lowerCAmelCase : List[str] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# Swinv2 has a different seq_length
lowerCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase : List[str] = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : str = reshaped_hidden_states[0].shape
lowerCAmelCase : Optional[Any] = (
reshaped_hidden_states[0].view(UpperCamelCase_ , UpperCamelCase_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Any = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Tuple = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Dict = 3
lowerCAmelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase : str = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase_ )
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@slow
def lowerCamelCase__ ( self : int ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase : int = SwinvaModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase, lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Union[str, Any] = _config_zero_init(UpperCamelCase_ )
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(config=UpperCamelCase_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class snake_case_( unittest.TestCase ):
@cached_property
def lowerCamelCase__ ( self : Dict ):
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
UpperCamelCase_ )
lowerCAmelCase : List[Any] = self.default_image_processor
lowerCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowerCAmelCase : Dict = model(**UpperCamelCase_ )
# verify the logits
lowerCAmelCase : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowerCAmelCase : Any = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 60 | 0 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=9_9 , UpperCamelCase__ : List[str]=3_2 , UpperCamelCase__ : int=5 , UpperCamelCase__ : str=4 , UpperCamelCase__ : int=3_7 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=5_1_2 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Tuple=0.0_2 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[str]="None" , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = relative_attention
UpperCamelCase = position_biased_input
UpperCamelCase = pos_att_type
UpperCamelCase = scope
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.get_config()
UpperCamelCase = 3_0_0
return config
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def A ( self : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = DebertaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )[0]
UpperCamelCase = model(UpperCamelCase__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def A ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = DebertaForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : List[str] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DebertaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(UpperCamelCase__ )
def A ( self : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = DebertaForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = DebertaForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = DebertaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*UpperCamelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase__ )
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = DebertaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='Model not available yet' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
@slow
def A ( self : str ):
"""simple docstring"""
UpperCamelCase = DebertaModel.from_pretrained('microsoft/deberta-base' )
UpperCamelCase = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
# compare the actual values for a slice.
UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) , f"""{output[:, 1:4, 1:4]}""" )
| 370 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_lowerCamelCase : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : str ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 4
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Tuple ):
"""simple docstring"""
return (4, 3_2, 3_2)
@property
def A ( self : Dict ):
"""simple docstring"""
return (4, 3_2, 3_2)
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
UpperCamelCase = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
UpperCamelCase = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['sample']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCamelCase__ )
UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : str , UpperCamelCase__ : Any=(3_2, 3_2) ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = self.dummy_input
UpperCamelCase = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(UpperCamelCase__ )
UpperCamelCase = noise
UpperCamelCase = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (2_5_6, 2_5_6)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : List[str] ):
"""simple docstring"""
pass
| 249 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase__ : List[Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Optional[Any] = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase__ : List[str] = {
'''roberta-base''': 5_12,
'''roberta-large''': 5_12,
'''roberta-large-mnli''': 5_12,
'''distilroberta-base''': 5_12,
'''roberta-base-openai-detector''': 5_12,
'''roberta-large-openai-detector''': 5_12,
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = ["""input_ids""", """attention_mask"""]
__lowerCamelCase = RobertaTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , __UpperCamelCase ) != add_prefix_space:
snake_case__ : List[str] = getattr(__UpperCamelCase , pre_tok_state.pop('type' ) )
snake_case__ : Optional[Any] = add_prefix_space
snake_case__ : List[str] = pre_tok_class(**__UpperCamelCase )
snake_case__ : List[str] = add_prefix_space
snake_case__ : Optional[int] = 'post_processor'
snake_case__ : Union[str, Any] = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
snake_case__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : Optional[int] = tuple(state['sep'] )
if "cls" in state:
snake_case__ : List[Any] = tuple(state['cls'] )
snake_case__ : Any = False
if state.get('add_prefix_space' , __UpperCamelCase ) != add_prefix_space:
snake_case__ : Tuple = add_prefix_space
snake_case__ : int = True
if state.get('trim_offsets' , __UpperCamelCase ) != trim_offsets:
snake_case__ : int = trim_offsets
snake_case__ : List[Any] = True
if changes_to_apply:
snake_case__ : Dict = getattr(__UpperCamelCase , state.pop('type' ) )
snake_case__ : Union[str, Any] = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
def __a ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __a ( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Optional[int] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
snake_case__ : List[str] = value
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
snake_case__ : Union[str, Any] = kwargs.get('is_split_into_words' , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __a ( self , *__UpperCamelCase , **__UpperCamelCase ) -> BatchEncoding:
'''simple docstring'''
snake_case__ : Union[str, Any] = kwargs.get('is_split_into_words' , __UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
snake_case__ : Any = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __a ( self , __UpperCamelCase , __UpperCamelCase=None ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 143 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : Optional[Any] = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Dict = ['''MobileViTFeatureExtractor''']
lowerCAmelCase__ : Optional[int] = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143 | 1 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __lowerCamelCase ( __snake_case : np.ndarray ) -> np.ndarray:
"""simple docstring"""
A__ : str =rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def __lowerCamelCase ( __snake_case : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : np.ndarray ) -> np.ndarray:
"""simple docstring"""
A__ : Dict =np.zeros_like(__snake_case )
A__ : int =np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
A__ : Dict =image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
A__ : Tuple =(
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
A__ : Dict =int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__snake_case : List[str] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__snake_case : Any = np.array(Image.open(lena_path))
# kernel to be applied
__snake_case : Optional[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__snake_case : List[Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__snake_case : str = Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 361 |
'''simple docstring'''
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
def lowercase__ ( self : Any , lowerCAmelCase_ : float ) -> float:
'''simple docstring'''
return 0.0
def __lowerCamelCase ( __snake_case : np.ndarray, __snake_case : int ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ : Tuple =min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ : str =max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : Any =512
A__ : int =[1] + [0] * (size - 1)
A__ : int =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : List[Any] =np.abs(np.fft.fft(__snake_case ) )
A__ : int =20 * np.logaa(__snake_case )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
# Display within reasonable bounds
A__ : Union[str, Any] =get_bounds(__snake_case, __snake_case )
plt.ylim(max([-80, bounds[0]] ), min([80, bounds[1]] ) )
plt.ylabel("""Gain (dB)""" )
plt.plot(__snake_case )
plt.show()
def __lowerCamelCase ( __snake_case : FilterType, __snake_case : int ) -> None:
"""simple docstring"""
A__ : List[Any] =512
A__ : List[Any] =[1] + [0] * (size - 1)
A__ : Dict =[filter_type.process(__snake_case ) for item in inputs]
A__ : Union[str, Any] =[0] * (samplerate - size) # zero-padding
outputs += filler
A__ : str =np.angle(np.fft.fft(__snake_case ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24, samplerate / 2 - 1 )
plt.xlabel("""Frequency (Hz)""" )
plt.xscale("""log""" )
plt.ylim(-2 * pi, 2 * pi )
plt.ylabel("""Phase shift (Radians)""" )
plt.plot(np.unwrap(__snake_case, -2 * pi ) )
plt.show()
| 136 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A ) , A )
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A )
self.assertTrue(np.allclose(transpose(A ) , transpose(A ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , transpose(A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A )
self.assertTrue(np.allclose(transpose(A ) , transpose(A ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , transpose(A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A )
self.assertTrue(np.allclose(transpose(A ) , np.asarray(transpose(A ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A )
self.assertTrue(np.allclose(transpose(A , axes=(1, 2, 0) ) , np.asarray(transpose(A , axes=(1, 2, 0) ) ) ) )
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , np.reshape(A , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A , (12, 5) ) , np.reshape(A , (12, 5) ) ) )
@require_torch
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , reshape(A , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A )
self.assertTrue(np.allclose(reshape(A , (12, 5) ) , reshape(A , (12, 5) ).numpy() ) )
@require_tf
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , reshape(A , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A )
self.assertTrue(np.allclose(reshape(A , (12, 5) ) , reshape(A , (12, 5) ).numpy() ) )
@require_flax
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A )
self.assertTrue(np.allclose(reshape(A , (4, 3) ) , np.asarray(reshape(A , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A )
self.assertTrue(np.allclose(reshape(A , (12, 5) ) , np.asarray(reshape(A , (12, 5) ) ) ) )
def snake_case_( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A ) , np.squeeze(A ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , np.squeeze(A , axis=2 ) ) )
@require_torch
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A )
self.assertTrue(np.allclose(squeeze(A ) , squeeze(A ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , squeeze(A , axis=2 ).numpy() ) )
@require_tf
def snake_case_( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A )
self.assertTrue(np.allclose(squeeze(A ) , squeeze(A ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , squeeze(A , axis=2 ).numpy() ) )
@require_flax
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A )
self.assertTrue(np.allclose(squeeze(A ) , np.asarray(squeeze(A ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A )
self.assertTrue(np.allclose(squeeze(A , axis=2 ) , np.asarray(squeeze(A , axis=2 ) ) ) )
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , np.expand_dims(A , axis=1 ) ) )
@require_torch
def snake_case_( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , expand_dims(A , axis=1 ).numpy() ) )
@require_tf
def snake_case_( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , expand_dims(A , axis=1 ).numpy() ) )
@require_flax
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A )
self.assertTrue(np.allclose(expand_dims(A , axis=1 ) , np.asarray(expand_dims(A , axis=1 ) ) ) )
| 58 |
'''simple docstring'''
import math
def UpperCAmelCase_ ( __lowerCamelCase : int ):
lowercase_ :Dict = []
lowercase_ :List[Any] = 2
lowercase_ :Optional[Any] = int(math.sqrt(__lowerCamelCase ) ) # Size of every segment
lowercase_ :Optional[Any] = [True] * (end + 1)
lowercase_ :Dict = []
while start <= end:
if temp[start] is True:
in_prime.append(__lowerCamelCase )
for i in range(start * start ,end + 1 ,__lowerCamelCase ):
lowercase_ :List[str] = False
start += 1
prime += in_prime
lowercase_ :Dict = end + 1
lowercase_ :Dict = min(2 * end ,__lowerCamelCase )
while low <= n:
lowercase_ :Any = [True] * (high - low + 1)
for each in in_prime:
lowercase_ :List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__lowerCamelCase ,high + 1 ,__lowerCamelCase ):
lowercase_ :str = False
for j in range(len(__lowerCamelCase ) ):
if temp[j] is True:
prime.append(j + low )
lowercase_ :Dict = high + 1
lowercase_ :Dict = min(high + end ,__lowerCamelCase )
return prime
print(sieve(10**6))
| 223 | 0 |
"""simple docstring"""
import math
import os
import sys
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[Any] = """"""
try:
with open(__lowerCAmelCase , """rb""" ) as binary_file:
SCREAMING_SNAKE_CASE__ : int = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE__ : Any = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> None:
lexicon.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE__ : str = """0""" + lexicon[curr_key]
SCREAMING_SNAKE_CASE__ : Optional[Any] = bin(__lowerCAmelCase )[2:]
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple = {"""0""": """0""", """1""": """1"""}
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = """""", """"""
SCREAMING_SNAKE_CASE__ : int = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE__ : List[Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
SCREAMING_SNAKE_CASE__ : str = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE__ : Optional[Any] = lexicon[curr_string]
result += last_match_id
return result
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : str = os.path.getsize(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = bin(__lowerCAmelCase )[2:]
SCREAMING_SNAKE_CASE__ : Any = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 8
try:
with open(__lowerCAmelCase , """wb""" ) as opened_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = read_file_binary(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = compress_data(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 56 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
a :Optional[int] = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase ) -> List[int]:
if isinstance(__lowerCAmelCase , np.ndarray ):
return list(tensor.shape )
SCREAMING_SNAKE_CASE__ : int = tf.shape(__lowerCAmelCase )
if tensor.shape == tf.TensorShape(__lowerCAmelCase ):
return dynamic
SCREAMING_SNAKE_CASE__ : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(__lowerCAmelCase )]
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ) -> tf.Tensor:
return tf.nn.softmax(logits=logits + 1E-9 , axis=__lowerCAmelCase , name=__lowerCAmelCase )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-5 , __lowerCAmelCase=-1 ) -> List[Any]:
# This is a very simplified functional layernorm, designed to duplicate
# the functionality of PyTorch nn.functional.layer_norm when this is needed to port
# models in Transformers.
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = tf.nn.moments(__lowerCAmelCase , axes=[axis] , keepdims=__lowerCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
SCREAMING_SNAKE_CASE__ : str = [1] * inputs.shape.rank
SCREAMING_SNAKE_CASE__ : Optional[int] = shape_list(__lowerCAmelCase )[axis]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
SCREAMING_SNAKE_CASE__ : Any = tf.nn.batch_normalization(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , offset=__lowerCAmelCase , scale=__lowerCAmelCase , variance_epsilon=__lowerCAmelCase , )
return outputs
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=-1 ) -> Optional[Any]:
# Replicates the behavior of torch.flatten in TF
# If end_dim or start_dim is negative, count them from the end
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.shape(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
SCREAMING_SNAKE_CASE__ : int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(__lowerCAmelCase , __lowerCAmelCase )
def _lowercase ( __lowerCAmelCase ) -> tf.Tensor:
if not isinstance(__lowerCAmelCase , tf.Tensor ):
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor(__lowerCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
SCREAMING_SNAKE_CASE__ : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "input_ids" ) -> None:
tf.debugging.assert_less(
__lowerCAmelCase , tf.cast(__lowerCAmelCase , dtype=tensor.dtype ) , message=(
F'''The maximum value of {tensor_name} ({tf.math.reduce_max(__lowerCAmelCase )}) must be smaller than the embedding '''
F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any = 6_4512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
SCREAMING_SNAKE_CASE__ : List[str] = [x for x in data if len(__lowerCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
F'''bytes: {bad_attributes}''' )
SCREAMING_SNAKE_CASE__ : Any = np.asarray(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = 1
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array_split(__lowerCAmelCase , __lowerCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
SCREAMING_SNAKE_CASE__ : List[str] = np.array_split(__lowerCAmelCase , __lowerCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = chunk_data
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = data
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
if name in group.attrs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [n.decode("""utf8""" ) if hasattr(__lowerCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : List[str] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(__lowerCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def _lowercase ( __lowerCAmelCase ) -> List[Any]:
def _expand_single_ad_tensor(__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(__lowerCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , __lowerCAmelCase )
| 56 | 1 |
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
def __init__( self : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
_UpperCAmelCase = num_of_nodes
_UpperCAmelCase = []
_UpperCAmelCase = {}
def UpperCamelCase ( self : Any , snake_case__ : int , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def UpperCamelCase ( self : Optional[Any] , snake_case__ : int ):
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : int ):
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
_UpperCAmelCase = self.find_component(snake_case__ )
def UpperCamelCase ( self : List[Any] , snake_case__ : list[int] , snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
_UpperCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case__ )
elif component_size[u_node] >= component_size[v_node]:
_UpperCAmelCase = self.find_component(snake_case__ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case__ )
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_UpperCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
_UpperCAmelCase = self.m_component[u]
_UpperCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_UpperCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case__ , snake_case__ ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = edge
_UpperCAmelCase = self.m_component[u]
_UpperCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case__ , snake_case__ , snake_case__ )
print(F"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_UpperCAmelCase = [-1] * self.m_num_of_nodes
print(F"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase_ : int = data_utils.TransfoXLCorpus
lowercase_ : Dict = data_utils
lowercase_ : Dict = data_utils
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case_ , "rb" ) as fp:
_UpperCAmelCase = pickle.load(snake_case_ , encoding="latin1" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_UpperCAmelCase = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"]
print(f"""Save vocabulary to {pytorch_vocab_dump_path}""" )
_UpperCAmelCase = corpus.vocab.__dict__
torch.save(snake_case_ , snake_case_ )
_UpperCAmelCase = corpus.__dict__
corpus_dict_no_vocab.pop("vocab" , snake_case_ )
_UpperCAmelCase = pytorch_dump_folder_path + "/" + CORPUS_NAME
print(f"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(snake_case_ , snake_case_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_UpperCAmelCase = os.path.abspath(snake_case_ )
_UpperCAmelCase = os.path.abspath(snake_case_ )
print(f"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_UpperCAmelCase = TransfoXLConfig()
else:
_UpperCAmelCase = TransfoXLConfig.from_json_file(snake_case_ )
print(f"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = TransfoXLLMHeadModel(snake_case_ )
_UpperCAmelCase = load_tf_weights_in_transfo_xl(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
_UpperCAmelCase = os.path.join(snake_case_ , snake_case_ )
print(f"""Save PyTorch model to {os.path.abspath(snake_case_ )}""" )
torch.save(model.state_dict() , snake_case_ )
print(f"""Save configuration file to {os.path.abspath(snake_case_ )}""" )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ : List[Any] = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 133 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _a, _a, _a ):
@register_to_config
def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False , ) -> str:
'''simple docstring'''
super().__init__()
snake_case_ : List[Any] = nn.Embedding(__magic_name__ , __magic_name__ )
snake_case_ : Any = nn.Embedding(__magic_name__ , __magic_name__ )
snake_case_ : Optional[Any] = False
snake_case_ : List[str] = nn.Dropout(p=__magic_name__ )
snake_case_ : Tuple = TaConfig(
vocab_size=__magic_name__ , d_model=__magic_name__ , num_heads=__magic_name__ , d_kv=__magic_name__ , d_ff=__magic_name__ , dropout_rate=__magic_name__ , feed_forward_proj=__magic_name__ , is_decoder=__magic_name__ , is_encoder_decoder=__magic_name__ , )
snake_case_ : List[Any] = nn.ModuleList()
for lyr_num in range(__magic_name__ ):
snake_case_ : Optional[int] = TaBlock(__magic_name__ )
self.encoders.append(__magic_name__ )
snake_case_ : Dict = TaLayerNorm(__magic_name__ )
snake_case_ : int = nn.Dropout(p=__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.token_embedder(__magic_name__ )
snake_case_ : str = encoder_input_tokens.shape[1]
snake_case_ : int = torch.arange(__magic_name__ , device=encoder_input_tokens.device )
x += self.position_encoding(__magic_name__ )
snake_case_ : Optional[Any] = self.dropout_pre(__magic_name__ )
# inverted the attention mask
snake_case_ : Tuple = encoder_input_tokens.size()
snake_case_ : List[str] = self.get_extended_attention_mask(__magic_name__ , __magic_name__ )
for lyr in self.encoders:
snake_case_ : Union[str, Any] = lyr(__magic_name__ , __magic_name__ )[0]
snake_case_ : Dict = self.layer_norm(__magic_name__ )
return self.dropout_post(__magic_name__ ), encoder_inputs_mask
| 364 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
lowerCAmelCase_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : int = os.path.join(_UpperCamelCase , '''tmp''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Dict = read_json(os.path.join(_UpperCamelCase , '''params.json''' ) )
snake_case_ : Tuple = NUM_SHARDS[model_size]
snake_case_ : Optional[Any] = params['''n_layers''']
snake_case_ : int = params['''n_heads''']
snake_case_ : Dict = n_heads // num_shards
snake_case_ : List[Any] = params['''dim''']
snake_case_ : str = dim // n_heads
snake_case_ : Any = 10_000.0
snake_case_ : Any = 1.0 / (base ** (torch.arange(0 , _UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ : Optional[Any] = n_heads_per_shard // num_key_value_heads
snake_case_ : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : str = n_heads
snake_case_ : Optional[int] = n_heads_per_shard
snake_case_ : str = dim
# permute for sliced rotary
def permute(_UpperCamelCase , _UpperCamelCase=n_heads , _UpperCamelCase=dim , _UpperCamelCase=dim ):
return w.view(_UpperCamelCase , dima // n_heads // 2 , 2 , _UpperCamelCase ).transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : Optional[Any] = torch.load(os.path.join(_UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
snake_case_ : Union[str, Any] = [
torch.load(os.path.join(_UpperCamelCase , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(_UpperCamelCase )
]
snake_case_ : Optional[Any] = 0
snake_case_ : str = {'''weight_map''': {}}
for layer_i in range(_UpperCamelCase ):
snake_case_ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : str = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : Union[str, Any] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ : int = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Union[str, Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : str = inv_freq
for k, v in state_dict.items():
snake_case_ : Dict = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Any = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : List[str] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ : Dict = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : List[str] = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
# Write configs
snake_case_ : int = {'''total_size''': param_count * 2}
write_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
snake_case_ : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ : Optional[int] = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ : Optional[Any] = LlamaConfig(
hidden_size=_UpperCamelCase , intermediate_size=compute_intermediate_size(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_UpperCamelCase , )
config.save_pretrained(_UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ : Union[str, Any] = LlamaForCausalLM.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_UpperCamelCase , safe_serialization=_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : Union[str, Any] = tokenizer_class(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
snake_case_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case_ : Dict = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
def __init__( self : Optional[int] , _a : List[str] , _a : List[Any]=13 , _a : str=7 , _a : str=True , _a : Any=True , _a : Tuple=True , _a : Dict=True , _a : str=99 , _a : int=32 , _a : Dict=2 , _a : Optional[int]=4 , _a : Any=37 , _a : List[Any]="gelu" , _a : Optional[int]=0.1 , _a : List[Any]=0.1 , _a : Optional[Any]=512 , _a : List[str]=16 , _a : Optional[Any]=2 , _a : List[str]=0.02 , _a : int=3 , _a : List[str]=4 , _a : Union[str, Any]=None , ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =13
_SCREAMING_SNAKE_CASE =7
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =99
_SCREAMING_SNAKE_CASE =32
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =37
_SCREAMING_SNAKE_CASE ='gelu'
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =0.1
_SCREAMING_SNAKE_CASE =512
_SCREAMING_SNAKE_CASE =16
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =0.02
_SCREAMING_SNAKE_CASE =3
_SCREAMING_SNAKE_CASE =4
_SCREAMING_SNAKE_CASE =None
def A ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE =None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE =random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE =None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : str , _a : Optional[Any] , _a : Optional[Any] , _a : int , _a : str , _a : List[Any] , _a : Optional[int] , _a : Any ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel(config=_a )
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_SCREAMING_SNAKE_CASE =[input_ids, input_mask]
_SCREAMING_SNAKE_CASE =model(_a )
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , _a : str , _a : Dict , _a : List[str] , _a : List[Any] , _a : Optional[Any] , _a : int , _a : List[Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =TFRoFormerForCausalLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self : Tuple , _a : Any , _a : Dict , _a : Optional[Any] , _a : str , _a : List[str] , _a : List[str] , _a : Tuple ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Optional[int] , _a : Optional[int] , _a : Union[str, Any] , _a : str , _a : Tuple , _a : Tuple , _a : Tuple , _a : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForSequenceClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , _a : List[Any] , _a : Dict , _a : List[str] , _a : Any , _a : Tuple , _a : str , _a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_choices
_SCREAMING_SNAKE_CASE =TFRoFormerForMultipleChoice(config=_a )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE =tf.tile(tf.expand_dims(_a , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Tuple , _a : str , _a : Union[str, Any] , _a : Union[str, Any] , _a : Any , _a : List[Any] , _a : Dict , _a : List[str] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFRoFormerForTokenClassification(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , _a : Any , _a : str , _a : Optional[Any] , _a : Optional[int] , _a : str , _a : Any , _a : Optional[int] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForQuestionAnswering(config=_a )
_SCREAMING_SNAKE_CASE ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_SCREAMING_SNAKE_CASE =model(_a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) =config_and_inputs
_SCREAMING_SNAKE_CASE ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A__ = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A__ = False
A__ = False
def A ( self : Union[str, Any] , _a : List[str] , _a : int , _a : Optional[int] , _a : List[Any] , _a : Dict ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , hidden_size=37 )
def A ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def A ( self : Tuple ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_a )
def A ( self : List[str] ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def A ( self : int ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def A ( self : Any ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(_a )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def A ( self : int ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
_SCREAMING_SNAKE_CASE =tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE =model(_a )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE =5_0000
_SCREAMING_SNAKE_CASE =[1, 6, vocab_size]
self.assertEqual(output.shape , _a )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE =tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-4 )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Dict ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE =emba(input_ids.shape )
_SCREAMING_SNAKE_CASE =tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]] )
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
] )
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
_SCREAMING_SNAKE_CASE =emba.weight[:3, :5]
tf.debugging.assert_near(_a , _a , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase ):
A__ = 1E-4
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
_SCREAMING_SNAKE_CASE =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE =embed_positions([2, 16, 768] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_a , _a , _a )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
] )
_SCREAMING_SNAKE_CASE =tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _a , atol=self.tolerance )
| 47 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Optional[torch.FloatTensor] = None
A__ : torch.FloatTensor = None
A__ : Optional[Tuple[torch.FloatTensor]] = None
A__ : Optional[Tuple[torch.FloatTensor]] = None
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : int=1 , _snake_case : int=0 , _snake_case : List[str]=2 , _snake_case : List[str]=512 , _snake_case : Tuple="cls" , _snake_case : Union[str, Any]=False , _snake_case : str=True , **_snake_case : Union[str, Any] , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
__lowercase : Union[str, Any] = project_dim
__lowercase : str = pooler_fn
__lowercase : List[str] = learn_encoder
__lowercase : int = use_attention_mask
class __lowerCAmelCase ( lowerCAmelCase_ ):
"""simple docstring"""
A__ : Any = [r'''pooler''', r'''logit_scale''']
A__ : Dict = [r'''position_ids''', r'''predictions.decoder.bias''']
A__ : Union[str, Any] = '''roberta'''
A__ : str = RobertaSeriesConfig
def __init__( self : List[str] , _snake_case : Any ):
super().__init__(_snake_case )
__lowercase : Union[str, Any] = XLMRobertaModel(_snake_case )
__lowercase : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Optional[int] = getattr(_snake_case , '''has_pre_transformation''' , _snake_case )
if self.has_pre_transformation:
__lowercase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
__lowercase : Any = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case_ ( self : Dict , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[bool] = None , ):
__lowercase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Any = self.base_model(
input_ids=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , encoder_hidden_states=_snake_case , encoder_attention_mask=_snake_case , output_attentions=_snake_case , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_snake_case , )
if self.has_pre_transformation:
__lowercase : Optional[int] = outputs['''hidden_states'''][-2]
__lowercase : Union[str, Any] = self.pre_LN(_snake_case )
__lowercase : Optional[int] = self.transformation_pre(_snake_case )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
__lowercase : str = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_snake_case , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 156 | 0 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = None
if token is not None:
snake_case_ : List[str] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case_ : Optional[int] = requests.get(__a , headers=__a ).json()
snake_case_ : List[str] = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case_ : Dict = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : Optional[Any] = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Union[str, Any] = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
snake_case_ : Union[str, Any] = requests.get(__a , headers=__a ).json()
snake_case_ : Any = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
snake_case_ : str = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(__a ):
snake_case_ : int = requests.get(url + f"""&page={i + 2}""" , headers=__a ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
snake_case_ : Dict = None
if token is not None:
snake_case_ : List[Any] = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case_ : Optional[int] = requests.get(__a , headers=__a , allow_redirects=__a )
snake_case_ : str = result.headers['Location']
snake_case_ : List[str] = requests.get(__a , allow_redirects=__a )
snake_case_ : Optional[Any] = os.path.join(__a , f"""{artifact_name}.zip""" )
with open(__a , 'wb' ) as fp:
fp.write(response.content )
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = []
snake_case_ : Tuple = None
with zipfile.ZipFile(__a ) as z:
for filename in z.namelist():
if not os.path.isdir(__a ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__a ) as f:
for line in f:
snake_case_ : Tuple = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
snake_case_ : Tuple = line[: line.index(': ' )]
snake_case_ : Union[str, Any] = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
snake_case_ : Any = line[len('FAILED ' ) :]
failed_tests.append(__a )
elif filename == "job_name.txt":
snake_case_ : Union[str, Any] = line
if len(__a ) != len(__a ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__a )} for `errors` """
f"""and {len(__a )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
' problem.' )
snake_case_ : List[str] = None
if job_name and job_links:
snake_case_ : Union[str, Any] = job_links.get(__a , __a )
# A list with elements of the form (line of error, error, failed test)
snake_case_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(__a , __a )]
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Any = []
snake_case_ : Any = [os.path.join(__a , __a ) for p in os.listdir(__a ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__a , job_links=__a ) )
return errors
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
snake_case_ : str = counter.most_common()
snake_case_ : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
snake_case_ : int = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = test.split('::' )[0]
if test.startswith('tests/models/' ):
snake_case_ : List[str] = test.split('/' )[2]
else:
snake_case_ : Union[str, Any] = None
return test
def SCREAMING_SNAKE_CASE__ ( __a , __a=None ):
snake_case_ : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
snake_case_ : str = [x for x in logs if x[2] is not None]
snake_case_ : int = {x[2] for x in logs}
snake_case_ : Dict = {}
for test in tests:
snake_case_ : List[str] = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
snake_case_ : Any = counter.most_common()
snake_case_ : str = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
snake_case_ : Tuple = sum(error_counts.values() )
if n_errors > 0:
snake_case_ : List[Any] = {'count': n_errors, 'errors': error_counts}
snake_case_ : int = dict(sorted(r.items() , key=lambda __a : item[1]["count"] , reverse=__a ) )
return r
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| no. | error | status |'
snake_case_ : str = '|-:|:-|:-|'
snake_case_ : Tuple = [header, sep]
for error in reduced_by_error:
snake_case_ : Dict = reduced_by_error[error]['count']
snake_case_ : List[str] = f"""| {count} | {error[:1_00]} | |"""
lines.append(__a )
return "\n".join(__a )
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Optional[Any] = '| model | no. of errors | major error | count |'
snake_case_ : Union[str, Any] = '|-:|-:|-:|-:|'
snake_case_ : Optional[int] = [header, sep]
for model in reduced_by_model:
snake_case_ : Any = reduced_by_model[model]['count']
snake_case_ ,snake_case_ : Dict = list(reduced_by_model[model]['errors'].items() )[0]
snake_case_ : Any = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__a )
return "\n".join(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(""" / """)
_SCREAMING_SNAKE_CASE = k[index + len(""" / """) :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, """job_links.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, """errors.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, """reduced_by_error.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, """reduced_by_model.txt"""), """w""", encoding="""UTF-8""") as fp:
fp.write(sa)
| 88 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-canny""" ,from_pt=_lowerCamelCase ,dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,controlnet=_lowerCamelCase ,from_pt=_lowerCamelCase ,dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = controlnet_params
SCREAMING_SNAKE_CASE = '''bird'''
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png""" )
SCREAMING_SNAKE_CASE = pipe.prepare_image_inputs([canny_image] * num_samples )
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = jax.random.split(_lowerCamelCase ,jax.device_count() )
SCREAMING_SNAKE_CASE = replicate(_lowerCamelCase )
SCREAMING_SNAKE_CASE = shard(_lowerCamelCase )
SCREAMING_SNAKE_CASE = shard(_lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe(
prompt_ids=_lowerCamelCase ,image=_lowerCamelCase ,params=_lowerCamelCase ,prng_seed=_lowerCamelCase ,num_inference_steps=50 ,jit=_lowerCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE = jnp.array(
[0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = FlaxControlNetModel.from_pretrained(
"""lllyasviel/sd-controlnet-openpose""" ,from_pt=_lowerCamelCase ,dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"""runwayml/stable-diffusion-v1-5""" ,controlnet=_lowerCamelCase ,from_pt=_lowerCamelCase ,dtype=jnp.bfloataa )
SCREAMING_SNAKE_CASE = controlnet_params
SCREAMING_SNAKE_CASE = '''Chef in the kitchen'''
SCREAMING_SNAKE_CASE = jax.device_count()
SCREAMING_SNAKE_CASE = pipe.prepare_text_inputs([prompts] * num_samples )
SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png""" )
SCREAMING_SNAKE_CASE = pipe.prepare_image_inputs([pose_image] * num_samples )
SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE = jax.random.split(_lowerCamelCase ,jax.device_count() )
SCREAMING_SNAKE_CASE = replicate(_lowerCamelCase )
SCREAMING_SNAKE_CASE = shard(_lowerCamelCase )
SCREAMING_SNAKE_CASE = shard(_lowerCamelCase )
SCREAMING_SNAKE_CASE = pipe(
prompt_ids=_lowerCamelCase ,image=_lowerCamelCase ,params=_lowerCamelCase ,prng_seed=_lowerCamelCase ,num_inference_steps=50 ,jit=_lowerCamelCase ,).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
SCREAMING_SNAKE_CASE = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE = images[0, 253:256, 253:256, -1]
SCREAMING_SNAKE_CASE = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE = jnp.array(
[[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 296 |
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowerCamelCase : Tuple = 'platform'
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
"""simple docstring"""
if attention_mask is None:
A_ : int = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A_ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A_ : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A_ : Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowercase :
def __init__( self : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=13 , _lowerCamelCase : Optional[int]=7 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Tuple=False , _lowerCamelCase : Dict=99 , _lowerCamelCase : List[Any]=16 , _lowerCamelCase : Any=2 , _lowerCamelCase : Union[str, Any]=4 , _lowerCamelCase : Dict=4 , _lowerCamelCase : Any="gelu" , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : List[Any]=32 , _lowerCamelCase : str=2 , _lowerCamelCase : List[Any]=1 , _lowerCamelCase : Optional[int]=0 , _lowerCamelCase : Optional[Any]=0.02 , ):
"""simple docstring"""
A_ : Any = parent
A_ : Any = batch_size
A_ : Optional[Any] = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_labels
A_ : str = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : List[str] = intermediate_size
A_ : int = hidden_act
A_ : List[Any] = hidden_dropout_prob
A_ : List[Any] = attention_probs_dropout_prob
A_ : List[Any] = max_position_embeddings
A_ : Tuple = eos_token_id
A_ : int = pad_token_id
A_ : int = bos_token_id
A_ : str = initializer_range
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A_ : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A_ : Optional[Any] = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[Any] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCamelCase , )
A_ : Any = prepare_blenderbot_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ , A_ : str = self.prepare_config_and_inputs()
return config, inputs_dict
def a_ ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
"""simple docstring"""
A_ : str = 20
A_ : Any = model_class_name(_lowerCamelCase )
A_ : List[Any] = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : int = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : int = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
A_ : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCamelCase , )
A_ : str = model.decode(_lowerCamelCase , _lowerCamelCase )
A_ : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def a_ ( self : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ):
"""simple docstring"""
A_ : Union[str, Any] = 20
A_ : Dict = model_class_name(_lowerCamelCase )
A_ : Dict = model.encode(inputs_dict['''input_ids'''] )
A_ , A_ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
A_ : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A_ : Dict = model.init_cache(decoder_input_ids.shape[0] , _lowerCamelCase , _lowerCamelCase )
A_ : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
A_ : List[str] = model.decode(
decoder_input_ids[:, -1:] , _lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCamelCase , decoder_position_ids=_lowerCamelCase , )
A_ : Tuple = model.decode(_lowerCamelCase , _lowerCamelCase , decoder_attention_mask=_lowerCamelCase )
A_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class lowercase ( unittest.TestCase):
__lowerCAmelCase : Dict = 99
def a_ ( self : str ):
"""simple docstring"""
A_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A_ : List[str] = input_ids.shape[0]
A_ : Optional[int] = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ , A_ : List[Any] = self._get_config_and_data()
A_ : Dict = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : Optional[int] = lm_model(input_ids=_lowerCamelCase )
A_ : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : str ):
"""simple docstring"""
A_ : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(_lowerCamelCase )
A_ : List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A_ : Optional[int] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A_ : Dict = lm_model(input_ids=_lowerCamelCase , decoder_input_ids=_lowerCamelCase )
A_ : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowerCamelCase )
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : int = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A_ : Tuple = shift_tokens_right(_lowerCamelCase , 1 , 2 )
A_ : Optional[int] = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
A_ : Tuple = np.equal(_lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowercase ( __UpperCAmelCase , unittest.TestCase , __UpperCAmelCase):
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
__lowerCAmelCase : List[str] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def a_ ( self : Tuple ):
"""simple docstring"""
A_ : Optional[int] = FlaxBlenderbotSmallModelTester(self )
def a_ ( self : List[str] ):
"""simple docstring"""
A_ , A_ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : str = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
A_ : Tuple = model_class(_lowerCamelCase )
@jax.jit
def encode_jitted(_lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , **_lowerCamelCase : List[str] ):
return model.encode(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )
with self.subTest('''JIT Enabled''' ):
A_ : Optional[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : List[Any] = encode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def a_ ( self : Tuple ):
"""simple docstring"""
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ : Union[str, Any] = model_class(_lowerCamelCase )
A_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
A_ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=_lowerCamelCase , decoder_attention_mask=_lowerCamelCase , encoder_outputs=_lowerCamelCase , )
with self.subTest('''JIT Enabled''' ):
A_ : Union[str, Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A_ : Optional[Any] = decode_jitted(**_lowerCamelCase ).to_tuple()
self.assertEqual(len(_lowerCamelCase ) , len(_lowerCamelCase ) )
for jitted_output, output in zip(_lowerCamelCase , _lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a_ ( self : Tuple ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A_ : str = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A_ : str = np.ones((1, 1) ) * model.config.eos_token_id
A_ : List[Any] = model(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
| 167 | 0 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__A : Any = data_utils.TransfoXLTokenizer
__A : Union[str, Any] = data_utils.TransfoXLCorpus
__A : str = data_utils
__A : List[Any] = data_utils
def __UpperCamelCase ( _A : List[Any] , _A : int , _A : Union[str, Any] , _A : str ) ->Union[str, Any]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(UpperCAmelCase_ , """rb""" ) as fp:
lowerCamelCase_ =pickle.load(UpperCAmelCase_ , encoding="""latin1""" )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase_ =pytorch_dump_folder_path + """/""" + VOCAB_FILES_NAMES["""pretrained_vocab_file"""]
print(f'Save vocabulary to {pytorch_vocab_dump_path}' )
lowerCamelCase_ =corpus.vocab.__dict__
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ =corpus.__dict__
corpus_dict_no_vocab.pop("""vocab""" , UpperCAmelCase_ )
lowerCamelCase_ =pytorch_dump_folder_path + """/""" + CORPUS_NAME
print(f'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(UpperCAmelCase_ , UpperCAmelCase_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase_ =os.path.abspath(UpperCAmelCase_ )
lowerCamelCase_ =os.path.abspath(UpperCAmelCase_ )
print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase_ =TransfoXLConfig()
else:
lowerCamelCase_ =TransfoXLConfig.from_json_file(UpperCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
lowerCamelCase_ =TransfoXLLMHeadModel(UpperCAmelCase_ )
lowerCamelCase_ =load_tf_weights_in_transfo_xl(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
lowerCamelCase_ =os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase_ =os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
print(f'Save PyTorch model to {os.path.abspath(UpperCAmelCase_ )}' )
torch.save(model.state_dict() , UpperCAmelCase_ )
print(f'Save configuration file to {os.path.abspath(UpperCAmelCase_ )}' )
with open(UpperCAmelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
__A : Any = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 352 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
__A : Optional[Any] = logging.getLogger()
def __UpperCamelCase ( _A : Path , _A : list ) ->Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ ="""\n""".join(_A )
Path(_A ).open("""w""" ).writelines(_A )
__A : List[str] = 'patrickvonplaten/t5-tiny-random'
__A : List[Any] = 'sshleifer/bart-tiny-random'
__A : List[str] = 'sshleifer/tiny-mbart'
__A : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ =[""" New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County."""]
_dump_articles(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =str(Path(self.get_auto_remove_tmp_dir() ) / """scores.json""" )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
run_generate()
assert Path(_SCREAMING_SNAKE_CASE ).exists()
# os.remove(Path(output_file_name))
def _snake_case ( self )-> List[Any]:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> int:
self.run_eval_tester(_SCREAMING_SNAKE_CASE )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Union[str, Any]:
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() ) / """utest_input.source"""
lowerCamelCase_ =input_file_name.parent / """utest_output.txt"""
assert not output_file_name.exists()
lowerCamelCase_ ={
"""en""": ["""Machine learning is great, isn't it?""", """I like to eat bananas""", """Tomorrow is another great day!"""],
"""de""": [
"""Maschinelles Lernen ist großartig, oder?""",
"""Ich esse gerne Bananen""",
"""Morgen ist wieder ein toller Tag!""",
],
}
lowerCamelCase_ =Path(self.get_auto_remove_tmp_dir() )
lowerCamelCase_ =str(tmp_dir / """scores.json""" )
lowerCamelCase_ =str(tmp_dir / """val.target""" )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""en"""] )
_dump_articles(_SCREAMING_SNAKE_CASE , text["""de"""] )
lowerCamelCase_ ="""translation_en_to_de""" if model == T5_TINY else """summarization"""
lowerCamelCase_ =f'\n run_eval_search.py\n {model}\n {str(_SCREAMING_SNAKE_CASE )}\n {str(_SCREAMING_SNAKE_CASE )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(["""--search""", """num_beams=1:2 length_penalty=0.9:1.0"""] )
with patch.object(_SCREAMING_SNAKE_CASE , """argv""" , _SCREAMING_SNAKE_CASE ):
with CaptureStdout() as cs:
run_search()
lowerCamelCase_ =[""" num_beams | length_penalty""", model, """Best score args"""]
lowerCamelCase_ =["""Info"""]
if "translation" in task:
expected_strings.append("""bleu""" )
else:
expected_strings.extend(_SCREAMING_SNAKE_CASE )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(_SCREAMING_SNAKE_CASE ).exists()
os.remove(Path(_SCREAMING_SNAKE_CASE ) )
| 49 | 0 |
from collections import defaultdict
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Dict:
"""simple docstring"""
__lowerCamelCase = 1
__lowerCamelCase = True
for v in tree[start]:
if v not in visited:
ret += dfs(lowerCAmelCase_ )
if ret % 2 == 0:
cuts.append(lowerCAmelCase_ )
return ret
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
__A = 10, 9
__A = defaultdict(list)
__A = {}
__A = []
__A = 0
__A = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 90 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : int = '''▁'''
lowerCamelCase : Optional[int] = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase : str = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''}
}
lowerCamelCase : str = {
'''google/pegasus-xsum''': 5_12,
}
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Tuple = VOCAB_FILES_NAMES
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __a : int , __a : Any="<pad>" , __a : Optional[int]="</s>" , __a : Union[str, Any]="<unk>" , __a : Optional[int]="<mask_2>" , __a : Optional[int]="<mask_1>" , __a : Dict=None , __a : List[str]=103 , __a : Optional[Dict[str, Any]] = None , **__a : List[Any] , ) -> None:
"""simple docstring"""
__lowercase : Tuple = offset
if additional_special_tokens is not None:
if not isinstance(__a , __a ):
raise TypeError(
F"additional_special_tokens should be of type {type(__a )}, but is"
F" {type(__a )}" )
__lowercase : Dict = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"<unk_{i}>" for i in range(len(__a ) , self.offset - 1 )
]
if len(set(__a ) ) != len(__a ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}." )
__lowercase : Optional[Any] = additional_special_tokens_extended
else:
__lowercase : int = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset )]
__lowercase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__a , unk_token=__a , mask_token=__a , pad_token=__a , mask_token_sent=__a , offset=__a , additional_special_tokens=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
__lowercase : Optional[Any] = mask_token_sent
__lowercase : Dict = vocab_file
__lowercase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
# add special tokens to encoder dict
__lowercase : Dict[int, str] = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__lowercase : Dict[str, int] = {v: k for k, v in self.encoder.items()}
@property
def lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return len(self.sp_model ) + self.offset
def lowerCAmelCase ( self : int ) -> Dict[str, int]:
"""simple docstring"""
__lowercase : Any = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.__dict__.copy()
__lowercase : Optional[Any] = None
return state
def __setstate__( self : Tuple , __a : Any ) -> Tuple:
"""simple docstring"""
__lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase : List[str] = {}
__lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Dict , __a : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__a , out_type=__a )
def lowerCAmelCase ( self : List[str] , __a : str ) -> int:
"""simple docstring"""
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__lowercase : Union[str, Any] = self.sp_model.piece_to_id(__a )
return sp_id + self.offset
def lowerCAmelCase ( self : Dict , __a : int ) -> str:
"""simple docstring"""
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__lowercase : List[Any] = self.sp_model.IdToPiece(index - self.offset )
return token
def lowerCAmelCase ( self : Union[str, Any] , __a : int ) -> Dict:
"""simple docstring"""
__lowercase : Optional[int] = []
__lowercase : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
__lowercase : str = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def lowerCAmelCase ( self : int , __a : Optional[Any]=False ) -> int:
"""simple docstring"""
return 1
def lowerCAmelCase ( self : Optional[int] , __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase ( self : Union[str, Any] , __a : List , __a : Optional[List] = None , __a : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(__a )
elif token_ids_a is None:
return self._special_token_mask(__a ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase ( self : Optional[int] , __a : Dict , __a : Tuple=None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase ( self : Tuple , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__lowercase : Optional[int] = os.path.join(
__a , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , """wb""" ) as fi:
__lowercase : Any = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,) | 233 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[Any] = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 82 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class _a ( _lowerCAmelCase ):
A = 42
A = None
def lowerCAmelCase_ (lowerCAmelCase__: List[str] , lowerCAmelCase__: Optional[int]=0.999 , lowerCAmelCase__: List[str]="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__: List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__: str ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
UpperCAmelCase_: List[Any] = []
for i in range(lowerCAmelCase__ ):
UpperCAmelCase_: Optional[int] = i / num_diffusion_timesteps
UpperCAmelCase_: int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class _a ( _lowerCAmelCase , _lowerCAmelCase ):
@register_to_config
def __init__(self, SCREAMING_SNAKE_CASE_ = 1000, SCREAMING_SNAKE_CASE_ = "fixed_small_log", SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1.0, SCREAMING_SNAKE_CASE_ = "epsilon", SCREAMING_SNAKE_CASE_ = "squaredcos_cap_v2", ) -> List[Any]:
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" )
UpperCAmelCase_: Tuple = betas_for_alpha_bar(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = 1.0 - self.betas
UpperCAmelCase_: int = torch.cumprod(self.alphas, dim=0 )
UpperCAmelCase_: Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase_: List[str] = 1.0
# setable values
UpperCAmelCase_: str = None
UpperCAmelCase_: str = torch.from_numpy(np.arange(0, SCREAMING_SNAKE_CASE_ )[::-1].copy() )
UpperCAmelCase_: Dict = variance_type
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> torch.FloatTensor:
return sample
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = num_inference_steps
UpperCAmelCase_: Tuple = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase_: Tuple = (np.arange(0, SCREAMING_SNAKE_CASE_ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase_: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None ) -> List[Any]:
if prev_timestep is None:
UpperCAmelCase_: Any = t - 1
UpperCAmelCase_: int = self.alphas_cumprod[t]
UpperCAmelCase_: Optional[int] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_: int = 1 - alpha_prod_t
UpperCAmelCase_: List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_: List[str] = self.betas[t]
else:
UpperCAmelCase_: List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase_: Tuple = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase_: List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase_: str = torch.log(torch.clamp(SCREAMING_SNAKE_CASE_, min=1E-20 ) )
UpperCAmelCase_: Dict = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase_: Dict = variance.log()
UpperCAmelCase_: Tuple = beta.log()
UpperCAmelCase_: int = (predicted_variance + 1) / 2
UpperCAmelCase_: int = frac * max_log + (1 - frac) * min_log
return variance
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_ = True, ) -> Union[UnCLIPSchedulerOutput, Tuple]:
UpperCAmelCase_: List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase_ , UpperCAmelCase_: List[str] = torch.split(SCREAMING_SNAKE_CASE_, sample.shape[1], dim=1 )
else:
UpperCAmelCase_: Union[str, Any] = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase_: List[Any] = t - 1
UpperCAmelCase_: Optional[int] = self.alphas_cumprod[t]
UpperCAmelCase_: Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase_: Optional[Any] = 1 - alpha_prod_t
UpperCAmelCase_: Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase_: Tuple = self.betas[t]
UpperCAmelCase_: Dict = self.alphas[t]
else:
UpperCAmelCase_: List[Any] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase_: List[str] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase_: Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase_: int = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'
""" for the UnCLIPScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase_: Optional[int] = torch.clamp(
SCREAMING_SNAKE_CASE_, -self.config.clip_sample_range, self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_: Optional[Any] = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase_: Optional[int] = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase_: List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase_: Union[str, Any] = 0
if t > 0:
UpperCAmelCase_: Any = randn_tensor(
model_output.shape, dtype=model_output.dtype, generator=SCREAMING_SNAKE_CASE_, device=model_output.device )
UpperCAmelCase_: Dict = self._get_variance(
SCREAMING_SNAKE_CASE_, predicted_variance=SCREAMING_SNAKE_CASE_, prev_timestep=SCREAMING_SNAKE_CASE_, )
if self.variance_type == "fixed_small_log":
UpperCAmelCase_: Optional[int] = variance
elif self.variance_type == "learned_range":
UpperCAmelCase_: Dict = (0.5 * variance).exp()
else:
raise ValueError(
f'variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'
""" for the UnCLIPScheduler.""" )
UpperCAmelCase_: int = variance * variance_noise
UpperCAmelCase_: List[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_, pred_original_sample=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, ) -> torch.FloatTensor:
# Make sure alphas_cumprod and timestep have same device and dtype as original_samples
UpperCAmelCase_: Tuple = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype )
UpperCAmelCase_: Union[str, Any] = timesteps.to(original_samples.device )
UpperCAmelCase_: Dict = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase_: int = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_: str = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_: Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase_: Optional[Any] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase_: Optional[int] = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase_: List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 82 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 |
'''simple docstring'''
import operator
def __magic_name__( lowerCamelCase, lowerCamelCase = False, lowerCamelCase = None):
__lowerCAmelCase = operator.lt if reverse else operator.gt
__lowerCAmelCase = solution or []
if not arr:
return solution
__lowerCAmelCase = [arr.pop(0)]
for i, item in enumerate(lowerCamelCase):
if _operator(lowerCamelCase, sublist[-1]):
sublist.append(lowerCamelCase)
arr.pop(lowerCamelCase)
# merging sublist into solution list
if not solution:
solution.extend(lowerCamelCase)
else:
while sublist:
__lowerCAmelCase = sublist.pop(0)
for i, xx in enumerate(lowerCamelCase):
if not _operator(lowerCamelCase, lowerCamelCase):
solution.insert(lowerCamelCase, lowerCamelCase)
break
else:
solution.append(lowerCamelCase)
strand_sort(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 174 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase_ ( A ):
"""simple docstring"""
lowerCamelCase_ = ['''image_processor''', '''tokenizer''']
lowerCamelCase_ = '''BlipImageProcessor'''
lowerCamelCase_ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self : Tuple , __lowerCamelCase : ImageInput = None , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : Dict , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_SCREAMING_SNAKE_CASE = self.tokenizer
_SCREAMING_SNAKE_CASE = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
_SCREAMING_SNAKE_CASE = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
_SCREAMING_SNAKE_CASE = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
_SCREAMING_SNAKE_CASE = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def lowerCAmelCase_ ( self : str , *__lowerCamelCase : Dict , **__lowerCamelCase : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase_ ( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
_SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 111 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( __A : list ) -> list:
if len(__A ) <= 1:
return lst
_SCREAMING_SNAKE_CASE = 1
while i < len(__A ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = lst[i], lst[i - 1]
i -= 1
if i == 0:
_SCREAMING_SNAKE_CASE = 1
return lst
if __name__ == "__main__":
lowerCamelCase_ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 111 | 1 |
from math import loga
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(snake_case__ , snake_case__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338 | import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 338 | 1 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCamelCase_ = False
lowerCamelCase_ = True
lowerCamelCase_ = False
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCamelCase_ = parser.parse_args()
lowerCamelCase_ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
lowerCamelCase_ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
lowerCamelCase_ = '' if has_file(args.repo_path, '''config.json''') else 'unet'
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCamelCase_ = reader.read()
lowerCamelCase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCamelCase_ = UNetaDModel(**config)
else:
lowerCamelCase_ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
lowerCamelCase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCamelCase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCamelCase_ = config[key]
del config[key]
lowerCamelCase_ = [k.replace('''UNetRes''', '''''') for k in config['down_block_types']]
lowerCamelCase_ = [k.replace('''UNetRes''', '''''') for k in config['up_block_types']]
if do_only_weights:
lowerCamelCase_ = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCamelCase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCamelCase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCamelCase_ = param_value
lowerCamelCase_ = True
if not has_changed:
lowerCamelCase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder)) | 367 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def UpperCamelCase( lowercase_ ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def UpperCamelCase( lowercase_ , lowercase_ ) -> XGBClassifier:
'''simple docstring'''
snake_case_ = XGBClassifier()
classifier.fit(lowercase_ , lowercase_ )
return classifier
def UpperCamelCase( ) -> None:
'''simple docstring'''
snake_case_ = load_iris()
snake_case_ , snake_case_ = data_handling(lowercase_ )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = train_test_split(
lowercase_ , lowercase_ , test_size=0.25 )
snake_case_ = iris["""target_names"""]
# Create an XGBoost Classifier from the training data
snake_case_ = xgboost(lowercase_ , lowercase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowercase_ , lowercase_ , lowercase_ , display_labels=lowercase_ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main() | 34 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A : Union[str, Any] = {'configuration_plbart': ['PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PLBartConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ['PLBartTokenizer']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
'PLBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'PLBartForCausalLM',
'PLBartForConditionalGeneration',
'PLBartForSequenceClassification',
'PLBartModel',
'PLBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
_A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 142 |
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
@staticmethod
def __lowerCamelCase ( *A : Dict , **A : Optional[int] ) ->Dict:
pass
@is_pipeline_test
@require_vision
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self : Any , A : List[str] , A : Tuple , A : List[str] ) ->List[Any]:
lowerCamelCase__ : List[str] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : Union[str, Any] = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def __lowerCamelCase ( self : List[Any] , A : Optional[int] , A : Tuple ) ->Optional[Any]:
lowerCamelCase__ : str = object_detector(examples[0] , threshold=0.0 )
lowerCamelCase__ : Union[str, Any] = len(A )
self.assertGreater(A , 0 )
self.assertEqual(
A , [
{
'''score''': ANY(A ),
'''label''': ANY(A ),
'''box''': {'''xmin''': ANY(A ), '''ymin''': ANY(A ), '''xmax''': ANY(A ), '''ymax''': ANY(A )},
}
for i in range(A )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : Dict ) ->List[Any]:
pass
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
lowerCamelCase__ : Optional[int] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCamelCase__ : List[Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCamelCase__ : str = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[Any]:
lowerCamelCase__ : Tuple = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : str = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCamelCase__ : List[Any] = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
pass
@require_torch
@slow
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[Any] = 0.2
lowerCamelCase__ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : Any = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : List[Any] = 2
lowerCamelCase__ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCamelCase__ : List[str] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=A , )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 142 | 1 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCamelCase__ = TypeVar('''T''')
lowerCamelCase__ = Union[List[T], Tuple[T, ...]]
lowerCamelCase__ = Union[T, List[T], Dict[str, T]]
lowerCamelCase__ = Union[str, bytes, os.PathLike]
| 63 | import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
_UpperCamelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small")
_UpperCamelCase = AutoTokenizer.from_pretrained("google/mt5-small")
_UpperCamelCase = tokenizer("Hello there" , return_tensors="np").input_ids
_UpperCamelCase = tokenizer("Hi I am" , return_tensors="np").input_ids
_UpperCamelCase = shift_tokens_right(lowercase_ , model.config.pad_token_id , model.config.decoder_start_token_id)
_UpperCamelCase = model(lowercase_ , decoder_input_ids=lowercase_).logits
_UpperCamelCase = optax.softmax_cross_entropy(lowercase_ , onehot(lowercase_ , logits.shape[-1])).mean()
_UpperCamelCase = -(labels.shape[-1] * loss.item())
_UpperCamelCase = -84.91_27
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 63 | 1 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : List[Any] = [False] * len(_lowercase )
lowerCamelCase_ : int = [-1] * len(_lowercase )
def dfs(_lowercase , _lowercase ):
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Tuple = c
for u in graph[v]:
if not visited[u]:
dfs(_lowercase , 1 - c )
for i in range(len(_lowercase ) ):
if not visited[i]:
dfs(_lowercase , 0 )
for i in range(len(_lowercase ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowercase : List[Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 318 |
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = "yolos"
def __init__( self : List[Any] , __a : Tuple=768 , __a : Union[str, Any]=12 , __a : int=12 , __a : Any=3072 , __a : Any="gelu" , __a : Optional[Any]=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : Union[str, Any]=1e-1_2 , __a : Dict=[512, 864] , __a : List[Any]=16 , __a : Any=3 , __a : Tuple=True , __a : List[Any]=100 , __a : Tuple=True , __a : List[str]=False , __a : str=1 , __a : Tuple=5 , __a : List[str]=2 , __a : List[str]=5 , __a : List[Any]=2 , __a : Tuple=0.1 , **__a : Union[str, Any] , ) -> str:
super().__init__(**__a )
_UpperCamelCase : Tuple = hidden_size
_UpperCamelCase : Dict = num_hidden_layers
_UpperCamelCase : Any = num_attention_heads
_UpperCamelCase : List[str] = intermediate_size
_UpperCamelCase : List[str] = hidden_act
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : List[str] = attention_probs_dropout_prob
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Optional[Any] = image_size
_UpperCamelCase : Optional[Any] = patch_size
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : int = qkv_bias
_UpperCamelCase : Optional[Any] = num_detection_tokens
_UpperCamelCase : str = use_mid_position_embeddings
_UpperCamelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCamelCase : Optional[int] = class_cost
_UpperCamelCase : Tuple = bbox_cost
_UpperCamelCase : List[str] = giou_cost
# Loss coefficients
_UpperCamelCase : int = bbox_loss_coefficient
_UpperCamelCase : Dict = giou_loss_coefficient
_UpperCamelCase : Dict = eos_coefficient
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> float:
return 1e-4
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return 12
| 310 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowerCamelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :int = "rag"
SCREAMING_SNAKE_CASE__ :List[str] = True
def __init__( self : List[Any] , __a : Optional[Any]=None , __a : str=True , __a : Tuple=None , __a : Dict=None , __a : Optional[int]=None , __a : Optional[int]=None , __a : List[Any]=None , __a : Dict=" / " , __a : int=" // " , __a : Optional[Any]=5 , __a : Dict=300 , __a : Optional[int]=768 , __a : Tuple=8 , __a : Union[str, Any]="wiki_dpr" , __a : Dict="train" , __a : List[Any]="compressed" , __a : str=None , __a : Tuple=None , __a : int=False , __a : str=False , __a : Optional[int]=0.0 , __a : Dict=True , __a : Tuple=False , __a : Dict=False , __a : str=False , __a : str=True , __a : Optional[Any]=None , **__a : Tuple , ) -> Any:
super().__init__(
bos_token_id=__a , pad_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , is_encoder_decoder=__a , prefix=__a , vocab_size=__a , **__a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
_UpperCamelCase : Optional[int] = kwargs.pop("question_encoder" )
_UpperCamelCase : str = question_encoder_config.pop("model_type" )
_UpperCamelCase : Tuple = kwargs.pop("generator" )
_UpperCamelCase : str = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase : Union[str, Any] = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : str = AutoConfig.for_model(__a , **__a )
_UpperCamelCase : Optional[int] = reduce_loss
_UpperCamelCase : str = label_smoothing
_UpperCamelCase : int = exclude_bos_score
_UpperCamelCase : List[str] = do_marginalize
_UpperCamelCase : Optional[int] = title_sep
_UpperCamelCase : Optional[int] = doc_sep
_UpperCamelCase : Union[str, Any] = n_docs
_UpperCamelCase : Tuple = max_combined_length
_UpperCamelCase : Union[str, Any] = dataset
_UpperCamelCase : Any = dataset_split
_UpperCamelCase : List[str] = index_name
_UpperCamelCase : int = retrieval_vector_size
_UpperCamelCase : str = retrieval_batch_size
_UpperCamelCase : Dict = passages_path
_UpperCamelCase : str = index_path
_UpperCamelCase : Tuple = use_dummy_dataset
_UpperCamelCase : Union[str, Any] = output_retrieved
_UpperCamelCase : Optional[Any] = do_deduplication
_UpperCamelCase : str = use_cache
if self.forced_eos_token_id is None:
_UpperCamelCase : List[str] = getattr(self.generator , "forced_eos_token_id" , __a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , __a : PretrainedConfig , __a : PretrainedConfig , **__a : Optional[int] ) -> PretrainedConfig:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
_UpperCamelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCamelCase : List[Any] = self.question_encoder.to_dict()
_UpperCamelCase : Tuple = self.generator.to_dict()
_UpperCamelCase : Any = self.__class__.model_type
return output
| 310 | 1 |
'''simple docstring'''
from math import ceil
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Optional[Any] = 1001 ) -> int:
UpperCAmelCase_ : Any = 1
for i in range(1, int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ : Tuple = 2 * i + 1
UpperCAmelCase_ : Optional[int] = 2 * i
UpperCAmelCase_ : int = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
snake_case_ : List[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 125 |
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCAmelCase ( a__ , a__ , a__=1024 , a__=1024 , a__=False , **a__ ) -> Optional[Any]:
__a = AutoTokenizer.from_pretrained(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''train''' , **a__ )
__a = tok.pad_token_id
def get_lens(a__ ):
__a = tqdm(
DataLoader(a__ , batch_size=512 , num_workers=8 , shuffle=a__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__a = []
for batch in dl:
__a = batch['''input_ids'''].ne(a__ ).sum(1 ).tolist()
__a = batch['''labels'''].ne(a__ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(a__ , a__ ):
max_lens.append(max(a__ , a__ ) )
else:
max_lens.extend(a__ )
return max_lens
__a = get_lens(a__ )
__a = SeqaSeqDataset(a__ , a__ , a__ , a__ , type_path='''val''' , **a__ )
__a = get_lens(a__ )
pickle_save(a__ , train_ds.len_file )
pickle_save(a__ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file) | 6 | 0 |
import functools
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> int:
__snake_case = len(snake_case_ )
__snake_case = len(snake_case_ )
@functools.cache
def min_distance(snake_case_ : int , snake_case_ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__snake_case = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , snake_case_ ) , 1 + min_distance(snake_case_ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 238 |
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
snake_case_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : int , *a__ : List[Any] , **a__ : Dict ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
requires_backends(self , '''vision''' )
self.check_model_type(a__ )
def __call__(self : Optional[Any] , a__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a__ : List[str] ):
"""simple docstring"""
return super().__call__(a__ , **a__ )
def a (self : int , **a__ : int ):
"""simple docstring"""
return {}, {}, {}
def a (self : Optional[int] , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = image.size
__snake_case = self.image_processor(images=a__ , return_tensors=self.framework )
return model_inputs
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model(**a__ )
return model_outputs
def a (self : int , a__ : str ):
"""simple docstring"""
__snake_case = model_outputs.predicted_depth
__snake_case = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a__ )
__snake_case = prediction.squeeze().cpu().numpy()
__snake_case = (output * 255 / np.max(a__ )).astype('''uint8''' )
__snake_case = Image.fromarray(a__ )
__snake_case = {}
__snake_case = predicted_depth
__snake_case = depth
return output_dict
| 238 | 1 |
"""simple docstring"""
from typing import Dict
from .base import GenericTensor, Pipeline
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__):
if tokenize_kwargs is None:
__SCREAMING_SNAKE_CASE = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
"""truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""")
__SCREAMING_SNAKE_CASE = truncation
__SCREAMING_SNAKE_CASE = tokenize_kwargs
__SCREAMING_SNAKE_CASE = {}
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = return_tensors
return preprocess_params, {}, postprocess_params
def snake_case_ ( self , lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.framework
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__)
return model_inputs
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model(**lowerCAmelCase__)
return model_outputs
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=False):
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
return super().__call__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 100 |
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
__SCREAMING_SNAKE_CASE = False
if num < 0:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = -num
__SCREAMING_SNAKE_CASE = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(UpperCamelCase_ ) for e in binary )
return "0b" + "".join(str(UpperCamelCase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = """autoformer"""
lowercase_ = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : str = "student_t" , SCREAMING_SNAKE_CASE : str = "nll" , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : List[int] = [1, 2, 3, 4, 5, 6, 7] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : int = 0 , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : Optional[List[int]] = None , SCREAMING_SNAKE_CASE : int = 64 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 2 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : int = 32 , SCREAMING_SNAKE_CASE : str = "gelu" , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : float = 0.1 , SCREAMING_SNAKE_CASE : int = 100 , SCREAMING_SNAKE_CASE : float = 0.02 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : int = 10 , SCREAMING_SNAKE_CASE : int = 25 , SCREAMING_SNAKE_CASE : int = 3 , **SCREAMING_SNAKE_CASE : Any , ):
# time series specific configuration
lowercase__ : Dict = prediction_length
lowercase__ : Dict = context_length if context_length is not None else prediction_length
lowercase__ : Optional[Any] = distribution_output
lowercase__ : List[Any] = loss
lowercase__ : Any = input_size
lowercase__ : Union[str, Any] = num_time_features
lowercase__ : Optional[Any] = lags_sequence
lowercase__ : str = scaling
lowercase__ : str = num_dynamic_real_features
lowercase__ : List[str] = num_static_real_features
lowercase__ : Optional[Any] = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : Tuple = cardinality
else:
lowercase__ : Union[str, Any] = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
lowercase__ : str = embedding_dimension
else:
lowercase__ : str = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase__ : Tuple = num_parallel_samples
# Transformer architecture configuration
lowercase__ : Dict = input_size * len(self.lags_sequence ) + self._number_of_features
lowercase__ : List[Any] = d_model
lowercase__ : List[str] = encoder_attention_heads
lowercase__ : Tuple = decoder_attention_heads
lowercase__ : Union[str, Any] = encoder_ffn_dim
lowercase__ : List[Any] = decoder_ffn_dim
lowercase__ : Optional[Any] = encoder_layers
lowercase__ : List[Any] = decoder_layers
lowercase__ : List[Any] = dropout
lowercase__ : List[str] = attention_dropout
lowercase__ : Optional[int] = activation_dropout
lowercase__ : int = encoder_layerdrop
lowercase__ : Tuple = decoder_layerdrop
lowercase__ : List[Any] = activation_function
lowercase__ : Any = init_std
lowercase__ : Dict = use_cache
# Autoformer
lowercase__ : List[str] = label_length
lowercase__ : Optional[int] = moving_average
lowercase__ : int = autocorrelation_factor
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : str ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 121 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return TrainCommand(lowerCamelCase__ )
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
@staticmethod
def snake_case ( SCREAMING_SNAKE_CASE : ArgumentParser ):
lowercase__ : Optional[int] = parser.add_parser("train" , help="CLI tool to train a model on a task." )
train_parser.add_argument(
"--train_data" , type=SCREAMING_SNAKE_CASE , required=SCREAMING_SNAKE_CASE , help="path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences." , )
train_parser.add_argument(
"--column_label" , type=SCREAMING_SNAKE_CASE , default=0 , help="Column of the dataset csv file with example labels." )
train_parser.add_argument(
"--column_text" , type=SCREAMING_SNAKE_CASE , default=1 , help="Column of the dataset csv file with example texts." )
train_parser.add_argument(
"--column_id" , type=SCREAMING_SNAKE_CASE , default=2 , help="Column of the dataset csv file with example ids." )
train_parser.add_argument(
"--skip_first_row" , action="store_true" , help="Skip the first row of the csv file (headers)." )
train_parser.add_argument("--validation_data" , type=SCREAMING_SNAKE_CASE , default="" , help="path to validation dataset." )
train_parser.add_argument(
"--validation_split" , type=SCREAMING_SNAKE_CASE , default=0.1 , help="if validation dataset is not provided, fraction of train dataset to use as validation dataset." , )
train_parser.add_argument("--output" , type=SCREAMING_SNAKE_CASE , default="./" , help="path to saved the trained model." )
train_parser.add_argument(
"--task" , type=SCREAMING_SNAKE_CASE , default="text_classification" , help="Task to train the model on." )
train_parser.add_argument(
"--model" , type=SCREAMING_SNAKE_CASE , default="bert-base-uncased" , help="Model's name or path to stored model." )
train_parser.add_argument("--train_batch_size" , type=SCREAMING_SNAKE_CASE , default=32 , help="Batch size for training." )
train_parser.add_argument("--valid_batch_size" , type=SCREAMING_SNAKE_CASE , default=64 , help="Batch size for validation." )
train_parser.add_argument("--learning_rate" , type=SCREAMING_SNAKE_CASE , default=3E-5 , help="Learning rate." )
train_parser.add_argument("--adam_epsilon" , type=SCREAMING_SNAKE_CASE , default=1E-0_8 , help="Epsilon for Adam optimizer." )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self : int , SCREAMING_SNAKE_CASE : Namespace ):
lowercase__ : int = logging.get_logger("transformers-cli/training" )
lowercase__ : List[Any] = "tf" if is_tf_available() else "torch"
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = args.output
lowercase__ : Union[str, Any] = args.column_label
lowercase__ : Optional[int] = args.column_text
lowercase__ : Optional[int] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowercase__ : int = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowercase__ : List[str] = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Union[str, Any] = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowercase__ : Optional[int] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowercase__ : Dict = args.validation_split
lowercase__ : List[str] = args.train_batch_size
lowercase__ : Any = args.valid_batch_size
lowercase__ : Optional[int] = args.learning_rate
lowercase__ : int = args.adam_epsilon
def snake_case ( self : Dict ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def snake_case ( self : Union[str, Any] ):
raise NotImplementedError
def snake_case ( self : Union[str, Any] ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 121 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( _snake_case : int ) -> bool:
'''simple docstring'''
__magic_name__ : List[Any] = str(_snake_case )
return len(_snake_case ) == 9 and set(_snake_case ) == set("123456789" )
def lowerCAmelCase_ ( ) -> int | None:
'''simple docstring'''
for base_num in range(9999 , 4999 , -1 ):
__magic_name__ : int = 100002 * base_num
if is_9_pandigital(_snake_case ):
return candidate
for base_num in range(333 , 99 , -1 ):
__magic_name__ : Tuple = 1002003 * base_num
if is_9_pandigital(_snake_case ):
return candidate
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 281 |
import math
def lowerCAmelCase_ ( _snake_case : float , _snake_case : float ) -> float:
'''simple docstring'''
return math.pow(_snake_case , 2 ) - a
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
return 2 * x
def lowerCAmelCase_ ( _snake_case : float ) -> float:
'''simple docstring'''
__magic_name__ : Optional[int] = 2.0
while start <= a:
__magic_name__ : str = math.pow(_snake_case , 2 )
return start
def lowerCAmelCase_ ( _snake_case : float , _snake_case : int = 9999 , _snake_case : float = 0.00_000_000_000_001 ) -> float:
'''simple docstring'''
if a < 0:
raise ValueError("math domain error" )
__magic_name__ : Optional[int] = get_initial_point(_snake_case )
for _ in range(_snake_case ):
__magic_name__ : int = value
__magic_name__ : str = value - fx(_snake_case , _snake_case ) / fx_derivative(_snake_case )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class __a ( a_ ):
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
class __a ( a_ , a_ ):
@register_to_config
def __init__( self , a__ = 32 , a__ = 64 , a__ = 20 , a__ = 7_68 , a__=77 , a__=4 , a__ = 0.0 , a__ = "silu" , a__ = None , a__ = None , a__ = "linear" , a__ = "prd" , a__ = None , a__ = None , a__ = None , ):
super().__init__()
_lowerCamelCase = num_attention_heads
_lowerCamelCase = attention_head_dim
_lowerCamelCase = num_attention_heads * attention_head_dim
_lowerCamelCase = additional_embeddings
_lowerCamelCase = time_embed_dim or inner_dim
_lowerCamelCase = embedding_proj_dim or embedding_dim
_lowerCamelCase = clip_embed_dim or embedding_dim
_lowerCamelCase = Timesteps(lowercase_ , lowercase_ , 0 )
_lowerCamelCase = TimestepEmbedding(lowercase_ , lowercase_ , out_dim=lowercase_ , act_fn=lowercase_ )
_lowerCamelCase = nn.Linear(lowercase_ , lowercase_ )
if embedding_proj_norm_type is None:
_lowerCamelCase = None
elif embedding_proj_norm_type == "layer":
_lowerCamelCase = nn.LayerNorm(lowercase_ )
else:
raise ValueError(F'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' )
_lowerCamelCase = nn.Linear(lowercase_ , lowercase_ )
if encoder_hid_proj_type is None:
_lowerCamelCase = None
elif encoder_hid_proj_type == "linear":
_lowerCamelCase = nn.Linear(lowercase_ , lowercase_ )
else:
raise ValueError(F'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' )
_lowerCamelCase = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowercase_ ) )
if added_emb_type == "prd":
_lowerCamelCase = nn.Parameter(torch.zeros(1 , 1 , lowercase_ ) )
elif added_emb_type is None:
_lowerCamelCase = None
else:
raise ValueError(
F'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' )
_lowerCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , activation_fn='gelu' , attention_bias=lowercase_ , )
for d in range(lowercase_ )
] )
if norm_in_type == "layer":
_lowerCamelCase = nn.LayerNorm(lowercase_ )
elif norm_in_type is None:
_lowerCamelCase = None
else:
raise ValueError(F'Unsupported norm_in_type: {norm_in_type}.' )
_lowerCamelCase = nn.LayerNorm(lowercase_ )
_lowerCamelCase = nn.Linear(lowercase_ , lowercase_ )
_lowerCamelCase = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 )
causal_attention_mask.triu_(1 )
_lowerCamelCase = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , lowercase_ , persistent=lowercase_ )
_lowerCamelCase = nn.Parameter(torch.zeros(1 , lowercase_ ) )
_lowerCamelCase = nn.Parameter(torch.zeros(1 , lowercase_ ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def snake_case_ ( self ):
_lowerCamelCase = {}
def fn_recursive_add_processors(a__ , a__ , a__ ):
if hasattr(lowercase_ , 'set_processor' ):
_lowerCamelCase = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , lowercase_ , lowercase_ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowercase_ , lowercase_ , lowercase_ )
return processors
def snake_case_ ( self , a__ ):
_lowerCamelCase = len(self.attn_processors.keys() )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(lowercase_ )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(a__ , a__ , a__ ):
if hasattr(lowercase_ , 'set_processor' ):
if not isinstance(lowercase_ , lowercase_ ):
module.set_processor(lowercase_ )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , lowercase_ , lowercase_ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowercase_ , lowercase_ , lowercase_ )
def snake_case_ ( self ):
self.set_attn_processor(AttnProcessor() )
def snake_case_ ( self , a__ , a__ , a__ , a__ = None , a__ = None , a__ = True , ):
_lowerCamelCase = hidden_states.shape[0]
_lowerCamelCase = timestep
if not torch.is_tensor(lowercase_ ):
_lowerCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0:
_lowerCamelCase = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCamelCase = timesteps * torch.ones(lowercase_ , dtype=timesteps.dtype , device=timesteps.device )
_lowerCamelCase = self.time_proj(lowercase_ )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
_lowerCamelCase = timesteps_projected.to(dtype=self.dtype )
_lowerCamelCase = self.time_embedding(lowercase_ )
if self.embedding_proj_norm is not None:
_lowerCamelCase = self.embedding_proj_norm(lowercase_ )
_lowerCamelCase = self.embedding_proj(lowercase_ )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
_lowerCamelCase = self.encoder_hidden_states_proj(lowercase_ )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
_lowerCamelCase = self.proj_in(lowercase_ )
_lowerCamelCase = self.positional_embedding.to(hidden_states.dtype )
_lowerCamelCase = []
_lowerCamelCase = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowercase_ )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
_lowerCamelCase = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
_lowerCamelCase = hidden_states[:, None, :]
_lowerCamelCase = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
_lowerCamelCase = self.prd_embedding.to(hidden_states.dtype ).expand(lowercase_ , -1 , -1 )
additional_embeds.append(lowercase_ )
_lowerCamelCase = torch.cat(
lowercase_ , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
_lowerCamelCase = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
_lowerCamelCase = F.pad(
lowercase_ , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
_lowerCamelCase = hidden_states + positional_embeddings
if attention_mask is not None:
_lowerCamelCase = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0
_lowerCamelCase = F.pad(lowercase_ , (0, self.additional_embeddings) , value=0.0 )
_lowerCamelCase = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
_lowerCamelCase = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
_lowerCamelCase = self.norm_in(lowercase_ )
for block in self.transformer_blocks:
_lowerCamelCase = block(lowercase_ , attention_mask=lowercase_ )
_lowerCamelCase = self.norm_out(lowercase_ )
if self.prd_embedding is not None:
_lowerCamelCase = hidden_states[:, -1]
else:
_lowerCamelCase = hidden_states[:, additional_embeddings_len:]
_lowerCamelCase = self.proj_to_clip_embeddings(lowercase_ )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowercase_ )
def snake_case_ ( self , a__ ):
_lowerCamelCase = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 364 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A_ : List[str] =logging.get_logger(__name__)
A_ : Optional[Any] ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A_ : Tuple ={
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A_ : Any ={
"""junnyu/roformer_chinese_small""": 1_5_3_6,
"""junnyu/roformer_chinese_base""": 1_5_3_6,
"""junnyu/roformer_chinese_char_small""": 5_1_2,
"""junnyu/roformer_chinese_char_base""": 5_1_2,
"""junnyu/roformer_small_discriminator""": 1_2_8,
"""junnyu/roformer_small_generator""": 1_2_8,
}
A_ : List[str] ={
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : str = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : List[Any] = RoFormerTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , a__ ) != do_lower_case
or pre_tok_state.get('strip_accents' , a__ ) != strip_accents
):
_lowerCamelCase = getattr(a__ , pre_tok_state.pop('type' ) )
_lowerCamelCase = do_lower_case
_lowerCamelCase = strip_accents
_lowerCamelCase = pre_tok_class(**a__ )
_lowerCamelCase = do_lower_case
def __getstate__( self ):
_lowerCamelCase = self.__dict__.copy()
_lowerCamelCase = BertPreTokenizer()
return state
def __setstate__( self , a__ ):
_lowerCamelCase = d
_lowerCamelCase = self.__dict__['_tokenizer'].get_vocab()
_lowerCamelCase = PreTokenizer.custom(JiebaPreTokenizer(a__ ) )
def snake_case_ ( self , a__ , a__=None ):
_lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = [self.sep_token_id]
_lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , a__ , a__ = None ):
_lowerCamelCase = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def snake_case_ ( self , a__ , a__=None , a__=None , a__=False , **a__ , ):
_lowerCamelCase = BertPreTokenizer()
return super().save_pretrained(a__ , a__ , a__ , a__ , **a__ )
| 80 | 0 |
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ ( a__ ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = """Salesforce/blip-image-captioning-base"""
_UpperCamelCase : int = (
"""This is a tool that generates a description of an image. It takes an input named `image` which should be the """
"""image to caption, and returns a text that contains the description in English."""
)
_UpperCamelCase : Optional[Any] = """image_captioner"""
_UpperCamelCase : Union[str, Any] = AutoModelForVisionaSeq
_UpperCamelCase : List[str] = ["""image"""]
_UpperCamelCase : int = ["""text"""]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['vision'] )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.pre_processor(images=UpperCamelCase_ , return_tensors='pt' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.model.generate(**UpperCamelCase_ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0].strip()
| 195 |
"""simple docstring"""
import numpy as np
def _snake_case ( _snake_case : np.array ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 60 | 0 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , *A_ , **A_ ) -> None:
warnings.warn(
'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ImageGPTImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 117 |
import pprint
import requests
_A = 'https://zenquotes.io/api'
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def _UpperCAmelCase ( ):
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
_A = random_quotes()
pprint.pprint(response)
| 117 | 1 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class _lowerCamelCase( _a ):
lowercase_ : Optional[Any] = DistilBertTokenizer
lowercase_ : Optional[int] = DistilBertTokenizerFast
lowercase_ : Any = True
@slow
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : int = DistilBertTokenizer.from_pretrained('distilbert-base-uncased')
_lowercase : Union[str, Any] = tokenizer.encode('sequence builders', add_special_tokens=lowerCamelCase)
_lowercase : Tuple = tokenizer.encode('multi-sequence build', add_special_tokens=lowerCamelCase)
_lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase)
_lowercase : List[Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase, lowerCamelCase)
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21 |
from __future__ import annotations
from math import ceil, floor, sqrt
def UpperCamelCase_( lowerCamelCase_ = 200_0000 ) -> int:
_lowercase : list[int] = [0]
_lowercase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_lowercase : int = 0
# the area corresponding to the grid that gives the product closest to target
_lowercase : int = 0
# an estimate of b, using the quadratic formula
_lowercase : float
# the largest integer less than b_estimate
_lowercase : int
# the largest integer less than b_estimate
_lowercase : int
# the triangle number corresponding to b_floor
_lowercase : int
# the triangle number corresponding to b_ceil
_lowercase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_lowercase : Optional[int] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_lowercase : List[str] = floor(lowerCamelCase_ )
_lowercase : Dict = ceil(lowerCamelCase_ )
_lowercase : List[str] = triangle_numbers[b_floor]
_lowercase : List[str] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Union[str, Any] = triangle_b_first_guess * triangle_a
_lowercase : Union[str, Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_lowercase : Any = triangle_b_second_guess * triangle_a
_lowercase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"{solution() = }")
| 21 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'''
),
'''distilbert-base-german-cased''': (
'''https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'''
),
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
},
}
__snake_case = {
'''distilbert-base-uncased''': 512,
'''distilbert-base-uncased-distilled-squad''': 512,
'''distilbert-base-cased''': 512,
'''distilbert-base-cased-distilled-squad''': 512,
'''distilbert-base-german-cased''': 512,
'''distilbert-base-multilingual-cased''': 512,
}
__snake_case = {
'''distilbert-base-uncased''': {'''do_lower_case''': True},
'''distilbert-base-uncased-distilled-squad''': {'''do_lower_case''': True},
'''distilbert-base-cased''': {'''do_lower_case''': False},
'''distilbert-base-cased-distilled-squad''': {'''do_lower_case''': False},
'''distilbert-base-german-cased''': {'''do_lower_case''': False},
'''distilbert-base-multilingual-cased''': {'''do_lower_case''': False},
}
class lowercase ( A__ ):
"""simple docstring"""
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = PRETRAINED_INIT_CONFIGURATION
_a = ['input_ids', 'attention_mask']
_a = DistilBertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCamelCase__ :int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCamelCase__ :int = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
UpperCamelCase__ :Optional[Any] = do_lower_case
UpperCamelCase__ :Optional[Any] = strip_accents
UpperCamelCase__ :List[Any] = tokenize_chinese_chars
UpperCamelCase__ :Any = normalizer_class(**UpperCamelCase_ )
UpperCamelCase__ :int = do_lower_case
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
UpperCamelCase__ :Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :List[str] = [self.sep_token_id]
UpperCamelCase__ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
'''simple docstring'''
UpperCamelCase__ :str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ ) | 355 |
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
__snake_case = logging.getLogger(__name__)
class lowercase ( A__ ):
"""simple docstring"""
_a = 'masked_bert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0 , UpperCamelCase_="topK" , UpperCamelCase_="constant" , UpperCamelCase_=0.0 , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :Tuple = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :Any = num_attention_heads
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :str = intermediate_size
UpperCamelCase__ :List[Any] = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :int = type_vocab_size
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :str = pruning_method
UpperCamelCase__ :int = mask_init
UpperCamelCase__ :Optional[Any] = mask_scale | 219 | 0 |
"""simple docstring"""
import json
import sys
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) -> Any:
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
lowerCamelCase_ : Optional[Any] =json.load(lowerCamelCase__ )
lowerCamelCase_ : Any =["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(lowerCamelCase__ ):
lowerCamelCase_ : List[str] =results[benchmark_name]
lowerCamelCase_ : Optional[int] =benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
lowerCamelCase_ : Optional[int] ="| metric |"
lowerCamelCase_ : str ="|--------|"
lowerCamelCase_ : Optional[int] ="| new / old (diff) |"
for metric_name in sorted(lowerCamelCase__ ):
lowerCamelCase_ : Tuple =benchmark_res[metric_name]
lowerCamelCase_ : List[Any] =metric_vals["new"]
lowerCamelCase_ : Tuple =metric_vals.get("old" , lowerCamelCase__ )
lowerCamelCase_ : Union[str, Any] =metric_vals.get("diff" , lowerCamelCase__ )
lowerCamelCase_ : Dict =F""" {new_val:f}""" if isinstance(lowerCamelCase__ , (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(lowerCamelCase__ , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(lowerCamelCase__ , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(lowerCamelCase__ ) )
if __name__ == "__main__":
A__ : Optional[Any] = sys.argv[1]
A__ : Union[str, Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 144 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_lowercase: Tuple = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def a( A : Optional[Any] ) -> str:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def a( A : Dict , A : List[Any] , A : str ) -> List[str]:
"""simple docstring"""
return max(metric_fn(A , A ) for gt in ground_truths )
def a( A : str , A : Optional[Any] , A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
a = [line.strip() for line in open(A , "r" ).readlines()]
a = []
if args.gold_data_mode == "qa":
a = pd.read_csv(A , sep="\t" , header=A )
for answer_list in data[1]:
a = ast.literal_eval(A )
answers.append(A )
else:
a = [line.strip() for line in open(A , "r" ).readlines()]
a = [[reference] for reference in references]
a = a = a = 0
for prediction, ground_truths in zip(A , A ):
total += 1
em += metric_max_over_ground_truths(A , A , A )
fa += metric_max_over_ground_truths(A , A , A )
a = 100.0 * em / total
a = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def a( A : Dict , A : str , A : List[str] ) -> List[Any]:
"""simple docstring"""
a = args.k
a = [line.strip() for line in open(A , "r" ).readlines()]
a = [line.strip() for line in open(A , "r" ).readlines()]
a = a = 0
for hypo, reference in zip(A , A ):
a = set(hypo.split("\t" )[:k] )
a = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def a( A : Dict , A : Any , A : List[Any] ) -> Any:
"""simple docstring"""
def strip_title(A : Any ):
if title.startswith("\"" ):
a = title[1:]
if title.endswith("\"" ):
a = title[:-1]
return title
a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A , return_tensors="pt" , padding=A , truncation=A , )["input_ids"].to(args.device )
a = rag_model.rag.question_encoder(A )
a = question_enc_outputs[0]
a = rag_model.retriever(
A , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
a = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a = []
for docs in all_docs:
a = [strip_title(A ) for title in docs["title"]]
provenance_strings.append("\t".join(A ) )
return provenance_strings
def a( A : Union[str, Any] , A : Optional[int] , A : Tuple ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
a = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
A , return_tensors="pt" , padding=A , truncation=A )
a = inputs_dict.input_ids.to(args.device )
a = inputs_dict.attention_mask.to(args.device )
a = rag_model.generate( # rag_model overwrites generate
A , attention_mask=A , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=A , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a = rag_model.retriever.generator_tokenizer.batch_decode(A , skip_special_tokens=A )
if args.print_predictions:
for q, a in zip(A , A ):
logger.info("Q: {} - A: {}".format(A , A ) )
return answers
def a( ) -> Any:
"""simple docstring"""
a = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=A , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=A , choices=["exact", "compressed", "legacy"] , type=A , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=A , type=A , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=A , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=A , type=A , required=A , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=A , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=A , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=A , type=A , required=A , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=A , type=A , required=A , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=A , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=A , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=A , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=A , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=A , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=A , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
a = parser.parse_args()
a = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def a( A : Any ) -> Optional[Any]:
"""simple docstring"""
a = {}
if args.model_type is None:
a = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
a = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
a = args.n_docs
if args.index_name is not None:
a = args.index_name
if args.index_path is not None:
a = args.index_path
else:
a = BartForConditionalGeneration
a = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , A )
a = get_scores if args.eval_mode == "e2e" else get_precision_at_k
a = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(A , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(A ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
a = RagRetriever.from_pretrained(A , **A )
a = model_class.from_pretrained(A , retriever=A , **A )
model.retriever.init_retrieval()
else:
a = model_class.from_pretrained(A , **A )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
a = []
for line in tqdm(A ):
questions.append(line.strip() )
if len(A ) == args.eval_batch_size:
a = evaluate_batch_fn(A , A , A )
preds_file.write("\n".join(A ) + "\n" )
preds_file.flush()
a = []
if len(A ) > 0:
a = evaluate_batch_fn(A , A , A )
preds_file.write("\n".join(A ) )
preds_file.flush()
score_fn(A , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
_lowercase: Optional[int] = get_args()
main(args)
| 227 | 0 |
from math import pi
def _A ( __magic_name__ , __magic_name__ ):
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 358 |
def _A ( __magic_name__ ):
lowercase__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _A ( __magic_name__ ):
lowercase__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowercase__ = remove_duplicates(key.upper() )
lowercase__ = len(__magic_name__ )
# First fill cipher with key characters
lowercase__ = {alphabet[i]: char for i, char in enumerate(__magic_name__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__magic_name__ ) , 26 ):
lowercase__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowercase__ = alphabet[i - offset]
lowercase__ = char
return cipher_alphabet
def _A ( __magic_name__ , __magic_name__ ):
return "".join(cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _A ( ):
lowercase__ = input("Enter message to encode or decode: " ).strip()
lowercase__ = input("Enter keyword: " ).strip()
lowercase__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowercase__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowercase__ = create_cipher_map(__magic_name__ )
print(func(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 201 | 0 |
import mpmath # for roots of unity
import numpy as np
class lowercase__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Tuple=None ) -> List[Any]:
# Input as list
lowercase_ = list(poly_a or [0] )[:]
lowercase_ = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase_ = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase_ = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase_ = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase_ = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase_ = self.__multiply()
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
lowercase_ = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return dft[0]
#
lowercase_ = self.c_max_length // 2
while next_ncol > 0:
lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowercase_ = self.root**next_ncol
# First half of next step
lowercase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase_ = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase_ = new_dft
lowercase_ = next_ncol // 2
return dft[0]
def _lowercase ( self : int ) -> Dict:
lowercase_ = self.__dft('''A''' )
lowercase_ = self.__dft('''B''' )
lowercase_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase_ = 2
while next_ncol <= self.c_max_length:
lowercase_ = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
lowercase_ = self.root ** (next_ncol // 2)
lowercase_ = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase_ = new_inverse_c
next_ncol *= 2
# Unpack
lowercase_ = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ) -> Optional[int]:
lowercase_ = '''A = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase_ = '''B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase_ = '''A*B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __lowerCAmelCase ( lowerCAmelCase__ ):
@slow
@require_torch
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowerCamelCase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 128
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowerCamelCase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(__UpperCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=512 )
__lowerCamelCase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=__UpperCAmelCase , max_length=128 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowerCamelCase = outputs.attention_mask
assert all(len(__UpperCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(__UpperCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(__UpperCAmelCase ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__UpperCAmelCase ) )] ) / len(__UpperCAmelCase )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__UpperCAmelCase , batch_size=__UpperCAmelCase , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=__UpperCAmelCase , per_device_train_batch_size=__UpperCAmelCase , per_device_eval_batch_size=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , evaluation_strategy='''steps''' , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , compute_metrics=_compute_metrics , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , tokenizer=__UpperCAmelCase , )
# start training
trainer.train()
| 330 | 0 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
lowerCamelCase__: Optional[int] =tmp_path / "file.csv"
lowerCamelCase__: Optional[int] =textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
lowerCamelCase__: str =tmp_path / "malformed_file.csv"
lowerCamelCase__: Optional[int] =textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a , __a ):
"""simple docstring"""
lowerCamelCase__: Optional[Any] =tmp_path / "csv_with_image.csv"
lowerCamelCase__: str =textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path / "csv_with_label.csv"
lowerCamelCase__: Dict =textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
@pytest.fixture
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
lowerCamelCase__: str =tmp_path / "csv_with_int_list.csv"
lowerCamelCase__: int =textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(__a , "w" ) as f:
f.write(__a )
return str(__a )
def lowerCAmelCase_ ( __a , __a , __a ):
"""simple docstring"""
lowerCamelCase__: Optional[Any] =Csv()
lowerCamelCase__: Optional[int] =csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__a , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(__a ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
with open(__a , encoding="utf-8" ) as f:
lowerCamelCase__: Any =f.read().splitlines()[1]
lowerCamelCase__: Optional[Any] =Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
lowerCamelCase__: List[Any] =csv._generate_tables([[csv_file_with_image]] )
lowerCamelCase__: List[Any] =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
lowerCamelCase__: Optional[int] =pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
with open(__a , encoding="utf-8" ) as f:
lowerCamelCase__: Optional[int] =f.read().splitlines()[1:]
lowerCamelCase__: Dict =Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
lowerCamelCase__: Dict =csv._generate_tables([[csv_file_with_label]] )
lowerCamelCase__: Tuple =pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
lowerCamelCase__: str =pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(__a ) for label in labels]
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda __a : [int(__a ) for i in x.split()]} )
lowerCamelCase__: Union[str, Any] =csv._generate_tables([[csv_file_with_int_list]] )
lowerCamelCase__: Union[str, Any] =pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
lowerCamelCase__: Dict =pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 358 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A = get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
'''simple docstring'''
lowercase_ = "all_checks"
lowercase_ = "basic_checks"
lowercase_ = "no_checks"
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( __a , __a , __a=None ) -> Optional[int]:
"""simple docstring"""
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(__a ) - set(__a ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__a ) - set(__a ) ) )
if len(set(__a ) - set(__a ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__a ) - set(__a ) ) )
lowerCamelCase__: List[Any] =[url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCamelCase__: Union[str, Any] =" for " + verification_name if verification_name is not None else ""
if len(__a ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowerCAmelCase_ ( __a , __a ) -> Any:
"""simple docstring"""
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(__a ) - set(__a ) ) > 0:
raise ExpectedMoreSplits(str(set(__a ) - set(__a ) ) )
if len(set(__a ) - set(__a ) ) > 0:
raise UnexpectedSplits(str(set(__a ) - set(__a ) ) )
lowerCamelCase__: Optional[int] =[
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__a ) > 0:
raise NonMatchingSplitsSizesError(str(__a ) )
logger.info("All the splits matched successfully." )
def lowerCAmelCase_ ( __a , __a = True ) -> dict:
"""simple docstring"""
if record_checksum:
lowerCamelCase__: str =shaaaa()
with open(__a , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b"" ):
m.update(__a )
lowerCamelCase__: Dict =m.hexdigest()
else:
lowerCamelCase__: List[str] =None
return {"num_bytes": os.path.getsize(__a ), "checksum": checksum}
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 273 | 0 |
from __future__ import annotations
from collections import namedtuple
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> tuple:
"""simple docstring"""
lowerCamelCase__ : List[Any] = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 |
from typing import Any
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> list:
"""simple docstring"""
_validation(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
# Creates data structures and fill initial step
_lowercase ={}
_lowercase ={}
for state in states_space:
_lowercase =observations_space[0]
_lowercase =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
_lowercase =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__snake_case ) ):
_lowercase =observations_space[o]
_lowercase =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
# Update probabilities and pointers dicts
_lowercase =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
_lowercase =arg_max
# The final observation
_lowercase =observations_space[len(__snake_case ) - 1]
# argmax for given final observation
_lowercase =''''''
_lowercase =-1
for k_state in states_space:
_lowercase =probabilities[(k_state, final_observation)]
if probability > max_probability:
_lowercase =probability
_lowercase =k_state
_lowercase =arg_max
# Process pointers backwards
_lowercase =last_state
_lowercase =[]
for o in range(len(__snake_case ) - 1 , -1 , -1 ):
result.append(__snake_case )
_lowercase =pointers[previous, observations_space[o]]
result.reverse()
return result
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_not_empty(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , )
_validate_lists(__snake_case , __snake_case )
_validate_dicts(
__snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_list(__snake_case , '''observations_space''' )
_validate_list(__snake_case , '''states_space''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a list"
raise ValueError(__snake_case )
else:
for x in _object:
if not isinstance(__snake_case , __snake_case ):
_lowercase =F"{var_name} must be a list of strings"
raise ValueError(__snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , ) -> None:
"""simple docstring"""
_validate_dict(__snake_case , '''initial_probabilities''' , __snake_case )
_validate_nested_dict(__snake_case , '''transition_probabilities''' )
_validate_nested_dict(__snake_case , '''emission_probabilities''' )
def UpperCAmelCase_ ( __snake_case , __snake_case ) -> None:
"""simple docstring"""
_validate_dict(_object , __snake_case , __snake_case )
for x in _object.values():
_validate_dict(__snake_case , __snake_case , __snake_case , __snake_case )
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __snake_case ):
_lowercase =F"{var_name} must be a dict"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object ):
_lowercase =F"{var_name} all keys must be strings"
raise ValueError(__snake_case )
if not all(isinstance(__snake_case , __snake_case ) for x in _object.values() ):
_lowercase ='''nested dictionary ''' if nested else ''''''
_lowercase =F"{var_name} {nested_text}all values must be {value_type.__name__}"
raise ValueError(__snake_case )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 5 | 0 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__a = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , ):
lowercase : List[Any] = [file for file in os.listdir(UpperCamelCase__ ) if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )]
if identifier is not None:
lowercase : List[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for n_ in n_identifier:
lowercase : Any = [file for file in files if n_ not in file]
else:
lowercase : List[Any] = [file for file in files if n_identifier not in file]
lowercase : Tuple = ignore_files or []
ignore_files.append('''__init__.py''' )
lowercase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' , UpperCamelCase__ )
if only_modules:
lowercase : Optional[Any] = file.split('''.''' )[0]
try:
lowercase : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
lowercase : List[Any] = doctest.DocTestSuite(UpperCamelCase__ )
lowercase : List[str] = unittest.TextTestRunner().run(UpperCamelCase__ )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""" )
else:
lowercase : str = doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def __lowerCamelCase ( self ):
lowercase : List[Any] = Path('''src/transformers''' )
lowercase : List[str] = "modeling"
lowercase : List[Any] = [
"modeling_ctrl.py",
"modeling_tf_ctrl.py",
]
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ , ignore_files=UpperCamelCase__ )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = Path('''src/transformers''' )
lowercase : Dict = "tokenization"
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ )
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = Path('''src/transformers''' )
lowercase : Any = "configuration"
self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ )
def __lowerCamelCase ( self ):
lowercase : Tuple = Path('''src/transformers''' )
lowercase : Tuple = ["configuration", "modeling", "tokenization"]
self.analyze_directory(UpperCamelCase__ , n_identifier=UpperCamelCase__ )
def __lowerCamelCase ( self ):
lowercase : str = Path('''docs/source''' )
lowercase : Any = ["favicon.ico"]
self.analyze_directory(UpperCamelCase__ , ignore_files=UpperCamelCase__ , only_modules=UpperCamelCase__ )
| 370 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 173 | 0 |
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[Any] = emb.weight.shape
_UpperCAmelCase : Optional[int] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
_UpperCAmelCase : List[str] = emb.weight.data
return lin_layer
def UpperCamelCase_ ( _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[Any] = torch.load(_UpperCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_UpperCAmelCase : List[Any] = mam_aaa["model"]
remove_ignore_keys_(_UpperCAmelCase )
_UpperCAmelCase : int = state_dict["encoder.embed_tokens.weight"].shape[0]
_UpperCAmelCase : Tuple = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_UpperCAmelCase : Union[str, Any] = state_dict["decoder.embed_tokens.weight"]
_UpperCAmelCase : Union[str, Any] = MaMaaaForConditionalGeneration(_UpperCAmelCase )
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
__SCREAMING_SNAKE_CASE : int = parser.parse_args()
__SCREAMING_SNAKE_CASE : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 31 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
if not isinstance(A , A ):
raise TypeError('''only integers accepted as input''' )
else:
lowercase__ = str(abs(A ) )
lowercase__ = [list(A ) for char in range(len(A ) )]
for index in range(len(A ) ):
num_transpositions[index].pop(A )
return max(
int(''''''.join(list(A ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 2 | 0 |
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : str , *,
a : int = 4 , a : int = 768 , a : int , a : str , )-> Any:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Parameter(torch.zeros(a ) )
# parameters for additional clip time embeddings
lowercase__ = nn.Linear(a , a )
lowercase__ = nn.Linear(a , a )
# parameters for encoder hidden states
lowercase__ = clip_extra_context_tokens
lowercase__ = nn.Linear(
a , self.clip_extra_context_tokens * cross_attention_dim )
lowercase__ = nn.Linear(a , a )
lowercase__ = nn.LayerNorm(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *, a : List[str] , a : str , a : Tuple , a : Union[str, Any] )-> List[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowercase__ = image_embeddings.shape[0]
lowercase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowercase__ = classifier_free_guidance_embeddings.expand(
a , -1 )
lowercase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowercase__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowercase__ = self.embedding_proj(a )
lowercase__ = self.clip_image_embeddings_project_to_time_embeddings(a )
lowercase__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowercase__ = self.clip_extra_context_tokens_proj(a )
lowercase__ = clip_extra_context_tokens.reshape(a , -1 , self.clip_extra_context_tokens )
lowercase__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowercase__ = self.encoder_hidden_states_proj(a )
lowercase__ = self.text_encoder_hidden_states_norm(a )
lowercase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 360 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCamelCase () -> str:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCamelCase () -> Any:
assert _test_patching.open is open
lowercase__ = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCamelCase () -> List[str]:
# pandas.read_csv is not present in _test_patching
lowercase__ = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _SCREAMING_SNAKE_CASE ):
pass
def __UpperCamelCase () -> List[str]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowercase__ = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _SCREAMING_SNAKE_CASE ) is None
with patch_submodule(_test_patching , 'len' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCamelCase () -> List[str]:
lowercase__ = '__test_patch_submodule_start_and_stop_mock__'
lowercase__ = patch_submodule(_test_patching , 'open' , _SCREAMING_SNAKE_CASE )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCamelCase () -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ = '__test_patch_submodule_successive_join__'
lowercase__ = '__test_patch_submodule_successive_dirname__'
lowercase__ = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.rename' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.dirname' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.dirname' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _SCREAMING_SNAKE_CASE ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _SCREAMING_SNAKE_CASE ):
pass
| 269 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.