code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : int = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Dict = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase = 10 , _lowercase = 1_000 , _lowercase = True ) -> int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(_lowercase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
lowerCamelCase_ : Any = lower
lowerCamelCase_ : int = higher
lowerCamelCase_ : int = []
while True:
lowerCamelCase_ : Optional[int] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
lowerCamelCase_ : Union[str, Any] = number
elif answer(_lowercase ) == "high":
lowerCamelCase_ : Any = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def lowercase_ ( ) -> None:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = int(input('''Enter lower value : ''' ).strip() )
lowerCamelCase_ : List[str] = int(input('''Enter high value : ''' ).strip() )
lowerCamelCase_ : List[Any] = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 318
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 318
|
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318
| 1
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
__lowercase : List[str] = float('''nan''')
class __lowercase :
def __init__(self , A ):
lowerCamelCase_ : Optional[Any] = sys.stdout
lowerCamelCase_ : int = open(A , '''a''' )
def __getattr__(self , A ):
return getattr(self.stdout , A )
def UpperCAmelCase__ (self , A ):
self.stdout.write(A )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , A , 0 , re.M ) )
def lowercase_ ( _lowercase=80 , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = []
# deal with critical env vars
lowerCamelCase_ : List[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase_ : str = os.environ.get(_lowercase , _lowercase )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
lowerCamelCase_ : Optional[Any] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_lowercase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase_ : Tuple = []
lowerCamelCase_ : Optional[Any] = ''''''
while len(_lowercase ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(_lowercase ) == 0 or len(_lowercase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_lowercase )
lowerCamelCase_ : Union[str, Any] = ''''''
return "\\\n".join(_lowercase )
def lowercase_ ( _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
lowerCamelCase_ : List[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
lowerCamelCase_ : List[str] = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
lowerCamelCase_ : List[str] = subprocess.run(_lowercase , capture_output=_lowercase , text=_lowercase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
lowerCamelCase_ : List[Any] = variation.replace(''' ''' , '''-''' )
with open(Path(_lowercase ) / F"""log.{prefix}.stdout.txt""" , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_lowercase ) / F"""log.{prefix}.stderr.txt""" , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , '''r''' , encoding='''utf-8''' ) as f:
lowerCamelCase_ : Any = json.load(_lowercase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : str = F"""{id}: {variation:<{longest_variation_len}}"""
lowerCamelCase_ : Optional[int] = F"""{preamble}: """
lowerCamelCase_ : str = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_lowercase ) , desc=_lowercase , leave=_lowercase ):
lowerCamelCase_ : Any = process_run_single(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
lowerCamelCase_ : Any = single_run_metrics[target_metric_key]
if not math.isnan(_lowercase ):
metrics.append(_lowercase )
results.append(_lowercase )
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase_ : Tuple = F"""\33[2K\r{outcome}"""
if len(_lowercase ) > 0:
lowerCamelCase_ : Dict = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
lowerCamelCase_ : Optional[int] = round(mean_metrics[target_metric_key] , 2 )
lowerCamelCase_ : Dict = F"""{outcome} {mean_target}"""
if len(_lowercase ) > 1:
results_str += F""" {tuple(round(_lowercase , 2 ) for x in results )}"""
print(_lowercase )
lowerCamelCase_ : str = variation
return mean_metrics
else:
print(_lowercase )
return {variation_key: variation, target_metric_key: nan}
def lowercase_ ( ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : List[str] = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return F"""
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
"""
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : List[str] = pd.DataFrame(_lowercase )
lowerCamelCase_ : Dict = '''variation'''
lowerCamelCase_ : str = '''diff_%'''
lowerCamelCase_ : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
lowerCamelCase_ : Any = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_lowercase ):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase_ : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_lowercase ):
lowerCamelCase_ : int = df.apply(
lambda _lowercase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase_ : Optional[Any] = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase_ : List[str] = df.reindex(_lowercase , axis='''columns''' ) # reorder cols
# capitalize
lowerCamelCase_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
lowerCamelCase_ : Any = df.rename(lambda _lowercase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
lowerCamelCase_ : Optional[int] = df.rename(lambda _lowercase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
lowerCamelCase_ : Optional[Any] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_lowercase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_lowercase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_lowercase ) )
def lowercase_ ( ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_lowercase , type=_lowercase , nargs='''+''' , required=_lowercase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_lowercase , type=_lowercase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_lowercase , type=_lowercase , required=_lowercase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_lowercase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_lowercase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_lowercase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_lowercase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase_ : Any = parser.parse_args()
lowerCamelCase_ : List[Any] = args.output_dir
Path(_lowercase ).mkdir(exist_ok=_lowercase )
lowerCamelCase_ : int = get_base_command(_lowercase , _lowercase )
# split each dimension into its --foo variations
lowerCamelCase_ : Union[str, Any] = [list(map(str.strip , re.split(R'''\|''' , _lowercase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_lowercase ) ) ) )
lowerCamelCase_ : List[str] = max(len(_lowercase ) for x in variations )
# split wanted keys
lowerCamelCase_ : str = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase_ : Any = F"""benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
lowerCamelCase_ : Optional[Any] = Tee(_lowercase )
print(F"""\n*** Running {len(_lowercase )} benchmarks:""" )
print(F"""Base command: {" ".join(_lowercase )}""" )
lowerCamelCase_ : List[Any] = '''variation'''
lowerCamelCase_ : Dict = []
for id, variation in enumerate(tqdm(_lowercase , desc='''Total completion: ''' , leave=_lowercase ) ):
lowerCamelCase_ : Dict = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _lowercase , _lowercase , _lowercase , _lowercase , args.target_metric_key , _lowercase , args.repeat_times , _lowercase , args.verbose , ) )
process_results(_lowercase , args.target_metric_key , _lowercase , args.base_variation , _lowercase )
if __name__ == "__main__":
main()
| 318
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowercase : List[str] = logging.getLogger(__name__)
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : str = argparse.ArgumentParser(
description='''Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).''' )
parser.add_argument('''--file_path''' , type=_lowercase , default='''data/dump.txt''' , help='''The path to the data.''' )
parser.add_argument('''--tokenizer_type''' , type=_lowercase , default='''bert''' , choices=['''bert''', '''roberta''', '''gpt2'''] )
parser.add_argument('''--tokenizer_name''' , type=_lowercase , default='''bert-base-uncased''' , help='''The tokenizer to use.''' )
parser.add_argument('''--dump_file''' , type=_lowercase , default='''data/dump''' , help='''The dump file prefix.''' )
lowerCamelCase_ : Tuple = parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowerCamelCase_ : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Union[str, Any] = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
lowerCamelCase_ : Union[str, Any] = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : List[str] = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
lowerCamelCase_ : Optional[int] = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ : Dict = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Dict = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
lowerCamelCase_ : Union[str, Any] = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , '''r''' , encoding='''utf8''' ) as fp:
lowerCamelCase_ : Union[str, Any] = fp.readlines()
logger.info('''Start encoding''' )
logger.info(F"""{len(_lowercase )} examples to process.""" )
lowerCamelCase_ : Union[str, Any] = []
lowerCamelCase_ : Optional[int] = 0
lowerCamelCase_ : Optional[int] = 10_000
lowerCamelCase_ : List[Any] = time.time()
for text in data:
lowerCamelCase_ : List[str] = F"""{bos} {text.strip()} {sep}"""
lowerCamelCase_ : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
rslt.append(_lowercase )
iter += 1
if iter % interval == 0:
lowerCamelCase_ : Union[str, Any] = time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowerCamelCase_ : int = time.time()
logger.info('''Finished binarization''' )
logger.info(F"""{len(_lowercase )} examples processed.""" )
lowerCamelCase_ : Tuple = F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowerCamelCase_ : Any = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ : Optional[Any] = [np.uintaa(_lowercase ) for d in rslt]
else:
lowerCamelCase_ : Dict = [np.intaa(_lowercase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(_lowercase , '''wb''' ) as handle:
pickle.dump(rslt_ , _lowercase , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 318
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318
| 1
|
'''simple docstring'''
__lowercase : List[Any] = '''Input must be a string of 8 numbers plus letter'''
__lowercase : Any = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowerCamelCase_ : str = F"""Expected string as input, found {type(_lowercase ).__name__}"""
raise TypeError(_lowercase )
lowerCamelCase_ : Dict = spanish_id.replace('''-''' , '''''' ).upper()
if len(_lowercase ) != 9:
raise ValueError(_lowercase )
try:
lowerCamelCase_ : Union[str, Any] = int(spanish_id_clean[0:8] )
lowerCamelCase_ : Any = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(_lowercase ) from ex
if letter.isdigit():
raise ValueError(_lowercase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase = 50 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318
| 1
|
'''simple docstring'''
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(_lowercase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowercase_ ( _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = _distribute_shards(**_lowercase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = _split_gen_kwargs(_lowercase , _lowercase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(_lowercase ):
_number_of_shards_in_gen_kwargs(_lowercase )
else:
lowerCamelCase_ : Optional[Any] = _number_of_shards_in_gen_kwargs(_lowercase )
assert out == expected
| 318
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318
| 1
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__lowercase : Dict = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCamelCase : Any = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCamelCase : Optional[int] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCamelCase : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : List[str] = ZeroShotClassificationPipeline(
model=A , tokenizer=A , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Dict = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# No kwarg
lowerCamelCase_ : str = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
lowerCamelCase_ : Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase_ : int = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
A , {'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
lowerCamelCase_ : List[Any] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(A , {'''sequence''': ANY(A ), '''labels''': [ANY(A )], '''scores''': [ANY(A )]} )
# https://github.com/huggingface/transformers/issues/13846
lowerCamelCase_ : List[str] = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(1 )
] , )
lowerCamelCase_ : Tuple = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
A , [
{'''sequence''': ANY(A ), '''labels''': [ANY(A ), ANY(A )], '''scores''': [ANY(A ), ANY(A )]}
for i in range(2 )
] , )
with self.assertRaises(A ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier(A , candidate_labels='''politics''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(A ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=A )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(A ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=A , )
self.run_entailment_id(A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = zero_shot_classifier.model.config
lowerCamelCase_ : Optional[Any] = config.labelaid
lowerCamelCase_ : str = zero_shot_classifier.entailment_id
lowerCamelCase_ : str = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
lowerCamelCase_ : int = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase_ : Dict = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
lowerCamelCase_ : List[str] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
lowerCamelCase_ : int = original_labelaid
self.assertEqual(A , zero_shot_classifier.entailment_id )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_0_0 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
lowerCamelCase_ : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
lowerCamelCase_ : int = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
lowerCamelCase_ : Union[str, Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase_ : Optional[int] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
lowerCamelCase_ : Optional[int] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.9_76, 0.0_15, 0.0_09],
} , )
lowerCamelCase_ : List[Any] = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=A , )
self.assertEqual(
nested_simplify(A ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 318
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318
| 1
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__lowercase : Optional[int] = False
class __lowercase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Union[str, Any] = '''A painting of a squirrel eating a burger '''
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : str = pipe(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(A )
lowerCamelCase_ : str = VersatileDiffusionTextToImagePipeline.from_pretrained(A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : List[Any] = generator.manual_seed(0 )
lowerCamelCase_ : Any = pipe(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : int = '''A painting of a squirrel eating a burger '''
lowerCamelCase_ : Dict = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images
lowerCamelCase_ : Dict = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCamelCase_ : List[Any] = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 318
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
| 1
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __lowercase ( _lowercase ):
lowerCamelCase : Union[str, Any] = ["image_processor", "feature_extractor"]
lowerCamelCase : Dict = "TvltImageProcessor"
lowerCamelCase : Optional[int] = "TvltFeatureExtractor"
def __init__(self , A , A ):
super().__init__(image_processor=A , feature_extractor=A )
lowerCamelCase_ : Union[str, Any] = image_processor
lowerCamelCase_ : Union[str, Any] = feature_extractor
def __call__(self , A=None , A=None , A=None , A=None , A=False , A=False , *A , **A , ):
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCamelCase_ : Union[str, Any] = None
if images is not None:
lowerCamelCase_ : Optional[int] = self.image_processor(A , mask_pixel=A , *A , **A )
if images_mixed is not None:
lowerCamelCase_ : int = self.image_processor(A , is_mixed=A , *A , **A )
if audio is not None:
lowerCamelCase_ : Dict = self.feature_extractor(
A , *A , sampling_rate=A , mask_audio=A , **A )
lowerCamelCase_ : int = {}
if audio is not None:
output_dict.update(A )
if images is not None:
output_dict.update(A )
if images_mixed_dict is not None:
output_dict.update(A )
return output_dict
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.image_processor.model_input_names
lowerCamelCase_ : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 318
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Any = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class __lowercase ( _lowercase , _lowercase ):
lowerCamelCase : Tuple = "resnet"
lowerCamelCase : Optional[int] = ["basic", "bottleneck"]
def __init__(self , A=3 , A=6_4 , A=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , A=[3, 4, 6, 3] , A="bottleneck" , A="relu" , A=False , A=None , A=None , **A , ):
super().__init__(**A )
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {",".join(self.layer_types )}""" )
lowerCamelCase_ : str = num_channels
lowerCamelCase_ : Tuple = embedding_size
lowerCamelCase_ : List[Any] = hidden_sizes
lowerCamelCase_ : Optional[Any] = depths
lowerCamelCase_ : List[Any] = layer_type
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : Any = downsample_in_first_stage
lowerCamelCase_ : int = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(A ) + 1 )]
lowerCamelCase_, lowerCamelCase_ : Tuple = get_aligned_output_features_output_indices(
out_features=A , out_indices=A , stage_names=self.stage_names )
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = version.parse("1.11" )
@property
def UpperCAmelCase__ (self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase__ (self ):
return 1E-3
| 318
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Any = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "xlm"
lowerCamelCase : Optional[Any] = {
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__(self , A=3_0_1_4_5 , A=2_0_4_8 , A=1_2 , A=1_6 , A=0.1 , A=0.1 , A=True , A=False , A=False , A=False , A=1 , A=True , A=5_1_2 , A=2_0_4_8**-0.5 , A=1E-12 , A=0.02 , A=0 , A=1 , A=2 , A=3 , A=5 , A=True , A="first" , A=True , A=None , A=True , A=0.1 , A=5 , A=5 , A=0 , A=0 , A=2 , A=0 , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[int] = emb_dim
lowerCamelCase_ : Optional[Any] = n_layers
lowerCamelCase_ : Any = n_heads
lowerCamelCase_ : Union[str, Any] = dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : str = gelu_activation
lowerCamelCase_ : int = sinusoidal_embeddings
lowerCamelCase_ : Optional[Any] = causal
lowerCamelCase_ : Optional[Any] = asm
lowerCamelCase_ : Any = n_langs
lowerCamelCase_ : Union[str, Any] = use_lang_emb
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : str = bos_index
lowerCamelCase_ : int = eos_index
lowerCamelCase_ : Tuple = pad_index
lowerCamelCase_ : Union[str, Any] = unk_index
lowerCamelCase_ : Optional[int] = mask_index
lowerCamelCase_ : Dict = is_encoder
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : List[Any] = embed_init_std
lowerCamelCase_ : List[str] = init_std
lowerCamelCase_ : Dict = summary_type
lowerCamelCase_ : Optional[Any] = summary_use_proj
lowerCamelCase_ : int = summary_activation
lowerCamelCase_ : Dict = summary_proj_to_labels
lowerCamelCase_ : Union[str, Any] = summary_first_dropout
lowerCamelCase_ : Optional[Any] = start_n_top
lowerCamelCase_ : List[Any] = end_n_top
lowerCamelCase_ : List[Any] = mask_token_id
lowerCamelCase_ : Union[str, Any] = lang_id
if "n_words" in kwargs:
lowerCamelCase_ : str = kwargs['''n_words''']
super().__init__(pad_token_id=A , bos_token_id=A , **A )
class __lowercase ( _lowercase ):
@property
def UpperCAmelCase__ (self ):
if self.task == "multiple-choice":
lowerCamelCase_ : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 318
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318
| 1
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowercase : Optional[Any] = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class __lowercase ( _lowercase ):
def __init__(self , A , A , A=None , A=1 ):
lowerCamelCase_ : Optional[int] = tokenizer
lowerCamelCase_ : Tuple = dataset
lowerCamelCase_ : int = len(A ) if n_tasks is None else n_tasks
lowerCamelCase_ : Union[str, Any] = n_copies
def __iter__(self ):
lowerCamelCase_ : List[str] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCamelCase_ : List[str] = self.tokenizer(A , padding=A , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __lowercase ( _lowercase ):
def __init__(self , A , A , A ):
lowerCamelCase_ : Optional[Any] = start_length
lowerCamelCase_ : Optional[Any] = eof_strings
lowerCamelCase_ : List[str] = tokenizer
def __call__(self , A , A , **A ):
lowerCamelCase_ : Optional[int] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ : str = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(A )
def lowercase_ ( _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = re.split('''(%s)''' % '''|'''.join(_lowercase ) , _lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=20 , **_lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = defaultdict(_lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_lowercase ) ):
with torch.no_grad():
lowerCamelCase_ : Dict = batch['''ids'''].shape[-1]
lowerCamelCase_ : Any = accelerator.unwrap_model(_lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=_lowercase , **_lowercase )
# each task is generated batch_size times
lowerCamelCase_ : List[Any] = batch['''task_id'''].repeat(_lowercase )
lowerCamelCase_ : Optional[int] = accelerator.pad_across_processes(
_lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_, lowerCamelCase_ : List[Any] = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ : int = generated_tokens.cpu().numpy()
lowerCamelCase_ : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_lowercase , _lowercase ):
gen_token_dict[task].append(_lowercase )
lowerCamelCase_ : List[Any] = [[] for _ in range(_lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ : List[Any] = tokenizer.decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
code_gens[task].append(remove_last_block(_lowercase ) )
return code_gens
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = HfArgumentParser(_lowercase )
lowerCamelCase_ : Optional[int] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ : Dict = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ : Tuple = '''false'''
if args.num_workers is None:
lowerCamelCase_ : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ : Optional[int] = Accelerator()
set_seed(args.seed , device_specific=_lowercase )
# Load model and tokenizer
lowerCamelCase_ : int = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ : Optional[int] = tokenizer.eos_token
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ : List[Any] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , _lowercase , _lowercase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ : Any = load_dataset('''openai_humaneval''' )
lowerCamelCase_ : Tuple = load_metric('''code_eval''' )
lowerCamelCase_ : Tuple = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowerCamelCase_ : Optional[int] = args.n_samples // args.batch_size
lowerCamelCase_ : Union[str, Any] = TokenizedDataset(_lowercase , human_eval['''test'''] , n_copies=_lowercase , n_tasks=_lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ : Union[str, Any] = DataLoader(_lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ : Tuple = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
lowerCamelCase_, lowerCamelCase_ : Tuple = accelerator.prepare(_lowercase , _lowercase )
lowerCamelCase_ : Union[str, Any] = complete_code(
_lowercase , _lowercase , _lowercase , _lowercase , n_tasks=_lowercase , batch_size=args.batch_size , **_lowercase , )
if accelerator.is_main_process:
lowerCamelCase_ : str = []
for task in tqdm(range(_lowercase ) ):
lowerCamelCase_ : Any = human_eval['''test'''][task]['''test''']
lowerCamelCase_ : Tuple = F"""check({human_eval["test"][task]["entry_point"]})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_, lowerCamelCase_ : str = code_eval_metric.compute(
references=_lowercase , predictions=_lowercase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(_lowercase , _lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318
|
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318
| 1
|
'''simple docstring'''
import operator
def lowercase_ ( _lowercase , _lowercase = False , _lowercase = None ) -> list:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = operator.lt if reverse else operator.gt
lowerCamelCase_ : Optional[int] = solution or []
if not arr:
return solution
lowerCamelCase_ : List[Any] = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
lowerCamelCase_ : List[str] = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 318
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class __lowercase :
lowerCamelCase : float
lowerCamelCase : TreeNode | None = None
lowerCamelCase : TreeNode | None = None
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
def is_valid_tree(_lowercase ) -> bool:
if node is None:
return True
if not isinstance(_lowercase , _lowercase ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_lowercase ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
_lowercase , _lowercase , _lowercase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _lowercase , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _lowercase )
)
return is_binary_search_tree_recursive_check(_lowercase , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318
| 1
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( _lowercase ):
lowerCamelCase : str = ["image_processor", "tokenizer"]
lowerCamelCase : Tuple = "LayoutLMv2ImageProcessor"
lowerCamelCase : Dict = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self , A=None , A=None , **A ):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowerCamelCase_ : Any = kwargs.pop('''feature_extractor''' )
lowerCamelCase_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__(self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ : Optional[int] = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
lowerCamelCase_ : Tuple = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ : Dict = features['''words''']
lowerCamelCase_ : Dict = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
lowerCamelCase_ : List[str] = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ : int = self.get_overflowing_images(A , encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ : Optional[int] = images
return encoded_inputs
def UpperCAmelCase__ (self , A , A ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase_ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(A )} and {len(A )}""" )
return images_with_overflow
def UpperCAmelCase__ (self , *A , **A ):
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase__ (self , *A , **A ):
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase__ (self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ (self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def UpperCAmelCase__ (self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 318
|
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Optional[Any] = 3
lowerCamelCase_ : List[Any] = (3_2, 3_2)
lowerCamelCase_ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=A , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : int = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
return CLIPTextModel(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Any = self.dummy_cond_unet_upscale
lowerCamelCase_ : Optional[Any] = DDPMScheduler()
lowerCamelCase_ : List[str] = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCamelCase_ : Optional[int] = self.dummy_vae
lowerCamelCase_ : Tuple = self.dummy_text_encoder
lowerCamelCase_ : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Optional[Any] = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Optional[int] = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
lowerCamelCase_ : List[Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Dict = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Dict = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase_ : Union[str, Any] = output.images
lowerCamelCase_ : Dict = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = sd_pipe(
[prompt] , image=A , generator=A , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , return_dict=A , )[0]
lowerCamelCase_ : str = image[0, -3:, -3:, -1]
lowerCamelCase_ : str = image_from_tuple[0, -3:, -3:, -1]
lowerCamelCase_ : Tuple = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCamelCase_ : Any = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : str = self.dummy_cond_unet_upscale
lowerCamelCase_ : Optional[Any] = DDPMScheduler()
lowerCamelCase_ : Union[str, Any] = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCamelCase_ : str = self.dummy_vae
lowerCamelCase_ : str = self.dummy_text_encoder
lowerCamelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Dict = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : List[str] = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
lowerCamelCase_ : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : int = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase_ : List[Any] = output.images
assert image.shape[0] == 2
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : List[str] = sd_pipe(
[prompt] , image=A , generator=A , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.dummy_cond_unet_upscale
lowerCamelCase_ : int = DDPMScheduler()
lowerCamelCase_ : Optional[Any] = DDIMScheduler(prediction_type='''v_prediction''' )
lowerCamelCase_ : Any = self.dummy_vae
lowerCamelCase_ : Optional[Any] = self.dummy_text_encoder
lowerCamelCase_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : Tuple = Image.fromarray(np.uinta(A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
lowerCamelCase_ : int = unet.half()
lowerCamelCase_ : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : str = StableDiffusionUpscalePipeline(
unet=A , low_res_scheduler=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , max_noise_level=3_5_0 , )
lowerCamelCase_ : Union[str, Any] = sd_pipe.to(A )
sd_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : int = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = sd_pipe(
[prompt] , image=A , generator=A , num_inference_steps=2 , output_type='''np''' , ).images
lowerCamelCase_ : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCamelCase_ : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
lowerCamelCase_ : Union[str, Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''a cat sitting on a park bench'''
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : str = pipe(
prompt=A , image=A , generator=A , output_type='''np''' , )
lowerCamelCase_ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
lowerCamelCase_ : List[str] = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Optional[int] = '''a cat sitting on a park bench'''
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , generator=A , output_type='''np''' , )
lowerCamelCase_ : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ (self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
lowerCamelCase_ : Any = '''stabilityai/stable-diffusion-x4-upscaler'''
lowerCamelCase_ : Dict = StableDiffusionUpscalePipeline.from_pretrained(
A , torch_dtype=torch.floataa , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ : Union[str, Any] = '''a cat sitting on a park bench'''
lowerCamelCase_ : Optional[int] = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , generator=A , num_inference_steps=5 , output_type='''np''' , )
lowerCamelCase_ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 318
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
__lowercase : Dict = ['''model.decoder.embed_positions.weights''']
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
if "emb" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
lowerCamelCase_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
lowerCamelCase_ : Tuple = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
lowerCamelCase_ : Optional[int] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
lowerCamelCase_ : List[Any] = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
lowerCamelCase_ : str = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
lowerCamelCase_ : Any = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
lowerCamelCase_ : Any = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
lowerCamelCase_ : List[Any] = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def lowercase_ ( _lowercase , _lowercase ) -> Tuple[Dict, Dict]:
'''simple docstring'''
lowerCamelCase_ : int = list(state_dict.keys() )
lowerCamelCase_ : Tuple = {}
for key in keys:
lowerCamelCase_ : Optional[int] = state_dict.pop(_lowercase )
lowerCamelCase_ : Dict = rename_keys(_lowercase )
if "in_proj_weight" in key:
# split fused qkv proj
lowerCamelCase_ : Tuple = val[:hidden_size, :]
lowerCamelCase_ : List[str] = val[hidden_size : 2 * hidden_size, :]
lowerCamelCase_ : Any = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowerCamelCase_ : int = val
else:
lowerCamelCase_ : Dict = val
return state_dict, enc_dec_proj_state_dict
def lowercase_ ( _lowercase ) -> MusicgenDecoderConfig:
'''simple docstring'''
if checkpoint == "small":
# default config values
lowerCamelCase_ : Tuple = 1_024
lowerCamelCase_ : List[Any] = 24
lowerCamelCase_ : Optional[Any] = 16
elif checkpoint == "medium":
lowerCamelCase_ : Union[str, Any] = 1_536
lowerCamelCase_ : Tuple = 48
lowerCamelCase_ : Dict = 24
elif checkpoint == "large":
lowerCamelCase_ : List[Any] = 2_048
lowerCamelCase_ : Dict = 48
lowerCamelCase_ : int = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowerCamelCase_ : str = MusicgenDecoderConfig(
hidden_size=_lowercase , ffn_dim=hidden_size * 4 , num_hidden_layers=_lowercase , num_attention_heads=_lowercase , )
return config
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None , _lowercase="cpu" ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : List[str] = MusicGen.get_pretrained(_lowercase , device=_lowercase )
lowerCamelCase_ : List[str] = decoder_config_from_checkpoint(_lowercase )
lowerCamelCase_ : str = fairseq_model.lm.state_dict()
lowerCamelCase_, lowerCamelCase_ : str = rename_state_dict(
_lowercase , hidden_size=decoder_config.hidden_size )
lowerCamelCase_ : Any = TaEncoderModel.from_pretrained('''t5-base''' )
lowerCamelCase_ : str = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
lowerCamelCase_ : List[Any] = MusicgenForCausalLM(_lowercase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowerCamelCase_, lowerCamelCase_ : List[Any] = decoder.load_state_dict(_lowercase , strict=_lowercase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(_lowercase )
if len(_lowercase ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(_lowercase ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowerCamelCase_ : Any = MusicgenForConditionalGeneration(text_encoder=_lowercase , audio_encoder=_lowercase , decoder=_lowercase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(_lowercase )
# check we can do a forward pass
lowerCamelCase_ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowerCamelCase_ : int = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowerCamelCase_ : Any = model(input_ids=_lowercase , decoder_input_ids=_lowercase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained('''t5-base''' )
lowerCamelCase_ : Any = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
lowerCamelCase_ : Tuple = MusicgenProcessor(feature_extractor=_lowercase , tokenizer=_lowercase )
# set the appropriate bos/pad token ids
lowerCamelCase_ : List[str] = 2_048
lowerCamelCase_ : Union[str, Any] = 2_048
# set other default generation config params
lowerCamelCase_ : List[str] = int(30 * audio_encoder.config.frame_rate )
lowerCamelCase_ : str = True
lowerCamelCase_ : Any = 3.0
if pytorch_dump_folder is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(_lowercase )
processor.save_pretrained(_lowercase )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(_lowercase )
processor.push_to_hub(_lowercase )
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
__lowercase : List[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : Optional[Any] = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> YolosConfig:
'''simple docstring'''
lowerCamelCase_ : Tuple = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
lowerCamelCase_ : List[str] = 192
lowerCamelCase_ : int = 768
lowerCamelCase_ : Tuple = 12
lowerCamelCase_ : int = 3
lowerCamelCase_ : int = [800, 1_333]
lowerCamelCase_ : List[Any] = False
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ : Tuple = 330
lowerCamelCase_ : Tuple = 14
lowerCamelCase_ : int = 6
lowerCamelCase_ : Optional[Any] = 1_320
elif "yolos_s" in yolos_name:
lowerCamelCase_ : Tuple = 384
lowerCamelCase_ : List[str] = 1_536
lowerCamelCase_ : List[str] = 12
lowerCamelCase_ : Optional[int] = 6
elif "yolos_b" in yolos_name:
lowerCamelCase_ : str = [800, 1_344]
lowerCamelCase_ : str = 91
lowerCamelCase_ : List[Any] = '''huggingface/label-files'''
lowerCamelCase_ : Optional[int] = '''coco-detection-id2label.json'''
lowerCamelCase_ : Any = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : Optional[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : Tuple = idalabel
lowerCamelCase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase , _lowercase = False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase_ : Optional[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCamelCase_ : Any = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase_ : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowerCamelCase_ : Optional[int] = in_proj_bias[: config.hidden_size]
lowerCamelCase_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase_ : Union[str, Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase_ : Any = in_proj_weight[-config.hidden_size :, :]
lowerCamelCase_ : List[Any] = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowercase ) -> str:
'''simple docstring'''
if "backbone" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''backbone''' , '''vit''' )
if "cls_token" in name:
lowerCamelCase_ : Dict = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "det_token" in name:
lowerCamelCase_ : Optional[int] = name.replace('''det_token''' , '''embeddings.detection_tokens''' )
if "mid_pos_embed" in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' )
if "pos_embed" in name:
lowerCamelCase_ : Any = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowerCamelCase_ : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "blocks" in name:
lowerCamelCase_ : List[str] = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCamelCase_ : Any = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCamelCase_ : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCamelCase_ : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCamelCase_ : Dict = name.replace('''mlp.fc2''' , '''output.dense''' )
if "class_embed" in name:
lowerCamelCase_ : Dict = name.replace('''class_embed''' , '''class_labels_classifier''' )
if "bbox_embed" in name:
lowerCamelCase_ : int = name.replace('''bbox_embed''' , '''bbox_predictor''' )
if "vit.norm" in name:
lowerCamelCase_ : Optional[int] = name.replace('''vit.norm''' , '''vit.layernorm''' )
return name
def lowercase_ ( _lowercase , _lowercase ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if "qkv" in key:
lowerCamelCase_ : Tuple = key.split('''.''' )
lowerCamelCase_ : int = int(key_split[2] )
lowerCamelCase_ : List[Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
lowerCamelCase_ : Any = val[:dim, :]
lowerCamelCase_ : Union[str, Any] = val[
dim : dim * 2, :
]
lowerCamelCase_ : int = val[-dim:, :]
else:
lowerCamelCase_ : Tuple = val[:dim]
lowerCamelCase_ : str = val[dim : dim * 2]
lowerCamelCase_ : List[Any] = val[-dim:]
else:
lowerCamelCase_ : List[str] = val
return orig_state_dict
def lowercase_ ( ) -> torch.Tensor:
'''simple docstring'''
lowerCamelCase_ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : int = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase = False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = get_yolos_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )['''model''']
# load 🤗 model
lowerCamelCase_ : Tuple = YolosForObjectDetection(_lowercase )
model.eval()
lowerCamelCase_ : str = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by YolosImageProcessor
lowerCamelCase_ : Union[str, Any] = 800 if yolos_name != '''yolos_ti''' else 512
lowerCamelCase_ : str = YolosImageProcessor(format='''coco_detection''' , size=_lowercase )
lowerCamelCase_ : List[str] = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : List[str] = model(**_lowercase )
lowerCamelCase_, lowerCamelCase_ : Dict = outputs.logits, outputs.pred_boxes
lowerCamelCase_, lowerCamelCase_ : Any = None, None
if yolos_name == "yolos_ti":
lowerCamelCase_ : Any = torch.tensor(
[[-39.50_22, -11.98_20, -17.68_88], [-29.95_74, -9.97_69, -17.76_91], [-42.32_81, -20.72_00, -30.62_94]] )
lowerCamelCase_ : str = torch.tensor(
[[0.40_21, 0.08_36, 0.79_79], [0.01_84, 0.26_09, 0.03_64], [0.17_81, 0.20_04, 0.20_95]] )
elif yolos_name == "yolos_s_200_pre":
lowerCamelCase_ : Optional[int] = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] )
lowerCamelCase_ : List[str] = torch.tensor(
[[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] )
elif yolos_name == "yolos_s_300_pre":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[[-36.22_20, -14.43_85, -23.54_57], [-35.69_70, -14.75_83, -21.39_35], [-31.59_39, -13.60_42, -16.80_49]] )
lowerCamelCase_ : Optional[Any] = torch.tensor(
[[0.76_14, 0.23_16, 0.47_28], [0.71_68, 0.44_95, 0.38_55], [0.49_96, 0.14_66, 0.99_96]] )
elif yolos_name == "yolos_s_dWr":
lowerCamelCase_ : List[Any] = torch.tensor(
[[-42.86_68, -24.10_49, -41.16_90], [-34.74_56, -14.12_74, -24.91_94], [-33.78_98, -12.19_46, -25.64_95]] )
lowerCamelCase_ : Tuple = torch.tensor(
[[0.55_87, 0.27_73, 0.06_05], [0.50_04, 0.30_14, 0.99_94], [0.49_99, 0.15_48, 0.99_94]] )
elif yolos_name == "yolos_base":
lowerCamelCase_ : Dict = torch.tensor(
[[-40.60_64, -24.30_84, -32.64_47], [-55.19_90, -30.77_19, -35.58_77], [-51.43_11, -33.35_07, -35.64_62]] )
lowerCamelCase_ : Optional[int] = torch.tensor(
[[0.55_55, 0.27_94, 0.06_55], [0.90_49, 0.26_64, 0.18_94], [0.91_83, 0.19_84, 0.16_35]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1e-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : List[Any] = {
'''yolos_ti''': '''yolos-tiny''',
'''yolos_s_200_pre''': '''yolos-small''',
'''yolos_s_300_pre''': '''yolos-small-300''',
'''yolos_s_dWr''': '''yolos-small-dwr''',
'''yolos_base''': '''yolos-base''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : Union[str, Any] = model_mapping[yolos_name]
image_processor.push_to_hub(_lowercase , organization='''hustvl''' )
model.push_to_hub(_lowercase , organization='''hustvl''' )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Tuple = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 318
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowercase : str = ['''bert-base-uncased''', '''bert-base-cased''']
__lowercase : Optional[int] = '''hf-internal-testing/tiny-bert-tf-only'''
if is_tf_available():
class __lowercase ( tf.keras.Model ):
def __init__(self , A ):
super().__init__()
lowerCamelCase_ : List[str] = tokenizer
lowerCamelCase_ : str = AutoConfig.from_pretrained(A )
lowerCamelCase_ : Union[str, Any] = TFAutoModel.from_config(A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = self.tokenizer(A )
lowerCamelCase_ : Optional[int] = self.bert(**A )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
BertTokenizer.from_pretrained(A ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase_ : Tuple = [TFBertTokenizer.from_pretrained(A ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(A , use_fast_bert_tokenizer=A )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase_ : Any = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowerCamelCase_ : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCAmelCase__ (self ):
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ : Optional[int] = tokenizer(A , return_tensors='''tf''' , padding='''longest''' )
lowerCamelCase_ : List[Any] = tf_tokenizer(A )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def UpperCAmelCase__ (self ):
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ : Dict = tf_tokenizer(self.paired_sentences )
lowerCamelCase_ : Tuple = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def UpperCAmelCase__ (self ):
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ : Tuple = tf.function(A )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase_ : Optional[Any] = tf.constant(A )
lowerCamelCase_ : int = compiled_tokenizer(A )
lowerCamelCase_ : Optional[int] = tf_tokenizer(A )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCAmelCase__ (self ):
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase_ : List[Any] = ModelToSave(tokenizer=A )
lowerCamelCase_ : List[str] = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase_ : Dict = model(A ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase_ : int = Path(A ) / '''saved.model'''
model.save(A )
lowerCamelCase_ : Dict = tf.keras.models.load_model(A )
lowerCamelCase_ : Union[str, Any] = loaded_model(A )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 318
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318
| 1
|
'''simple docstring'''
__lowercase : int = 9.8_06_65
def lowercase_ ( _lowercase , _lowercase , _lowercase = g ) -> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError('''Impossible fluid density''' )
if volume < 0:
raise ValueError('''Impossible Object volume''' )
if gravity <= 0:
raise ValueError('''Impossible Gravity''' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 318
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
__lowercase : Tuple = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
__lowercase : Union[str, Any] = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
__lowercase : List[str] = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCAmelCase__ (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , reference_urls=[] , )
def UpperCAmelCase__ (self , A , A , A=None , A=False , A=False , A=False , ):
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
lowerCamelCase_ : List[str] = np.array([re.sub(A , '''''' , A ) for x in predictions] )
lowerCamelCase_ : int = np.array([re.sub(A , '''''' , A ) for x in references] )
else:
lowerCamelCase_ : Any = np.asarray(A )
lowerCamelCase_ : Any = np.asarray(A )
if ignore_case:
lowerCamelCase_ : Union[str, Any] = np.char.lower(A )
lowerCamelCase_ : List[Any] = np.char.lower(A )
if ignore_punctuation:
lowerCamelCase_ : Any = string.punctuation.maketrans('''''' , '''''' , string.punctuation )
lowerCamelCase_ : str = np.char.translate(A , table=A )
lowerCamelCase_ : Dict = np.char.translate(A , table=A )
if ignore_numbers:
lowerCamelCase_ : Optional[Any] = string.digits.maketrans('''''' , '''''' , string.digits )
lowerCamelCase_ : List[str] = np.char.translate(A , table=A )
lowerCamelCase_ : Optional[int] = np.char.translate(A , table=A )
lowerCamelCase_ : Optional[Any] = predictions == references
return {"exact_match": np.mean(A ) * 1_0_0}
| 318
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318
| 1
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowercase : Dict = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowercase_ ( _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
lowerCamelCase_ : Dict = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowercase )
lowerCamelCase_, lowerCamelCase_ : str = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowercase , output_loading_info=_lowercase )
else:
lowerCamelCase_ : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowercase )
lowerCamelCase_, lowerCamelCase_ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
_lowercase , output_loading_info=_lowercase )
lowerCamelCase_ : Union[str, Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
lowerCamelCase_ : List[str] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
lowerCamelCase_ : Optional[Any] = key.split('''.''' )
if attributes[0] == "lm_head":
lowerCamelCase_ : Union[str, Any] = prophet
lowerCamelCase_ : Optional[int] = prophet_old
else:
lowerCamelCase_ : int = prophet.prophetnet
lowerCamelCase_ : Optional[Any] = prophet_old.model
lowerCamelCase_ : str = False
for attribute in attributes:
if attribute in mapping:
lowerCamelCase_ : Optional[Any] = mapping[attribute]
if not hasattr(_lowercase , _lowercase ) and len(_lowercase ) > 0:
lowerCamelCase_ : int = attribute
elif hasattr(_lowercase , _lowercase ):
lowerCamelCase_ : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowerCamelCase_ : int = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
lowerCamelCase_ : Optional[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowerCamelCase_ : List[str] = old_model.bias
logger.info(F"""{attribute} is initialized""" )
lowerCamelCase_ : Any = True
break
elif attribute in special_keys and hasattr(_lowercase , '''in_proj_weight''' ):
lowerCamelCase_ : str = old_model.in_proj_weight.shape[0] // 3
lowerCamelCase_ : List[Any] = getattr(_lowercase , _lowercase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowerCamelCase_ : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowerCamelCase_ : List[str] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowerCamelCase_ : Any = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowerCamelCase_ : Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowerCamelCase_ : Dict = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowerCamelCase_ : int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowerCamelCase_ : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
lowerCamelCase_ : List[str] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
lowerCamelCase_ : List[str] = True
break
if attribute.isdigit():
lowerCamelCase_ : Dict = model[int(_lowercase )]
lowerCamelCase_ : Any = old_model[int(_lowercase )]
else:
lowerCamelCase_ : Dict = getattr(_lowercase , _lowercase )
if old_attribute == "":
lowerCamelCase_ : int = old_model
else:
if not hasattr(_lowercase , _lowercase ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
lowerCamelCase_ : Dict = getattr(_lowercase , _lowercase )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 318
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
"The RoBERTa Model transformer with early exiting (DeeRoBERTa). " , _lowercase , )
class __lowercase ( _lowercase ):
lowerCamelCase : Union[str, Any] = RobertaConfig
lowerCamelCase : Any = "roberta"
def __init__(self , A ):
super().__init__(A )
lowerCamelCase_ : Any = RobertaEmbeddings(A )
self.init_weights()
@add_start_docstrings(
"RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. " , _lowercase , )
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = RobertaConfig
lowerCamelCase : Dict = "roberta"
def __init__(self , A ):
super().__init__(A )
lowerCamelCase_ : Optional[int] = config.num_labels
lowerCamelCase_ : Optional[Any] = config.num_hidden_layers
lowerCamelCase_ : Optional[Any] = DeeRobertaModel(A )
lowerCamelCase_ : Tuple = nn.Dropout(config.hidden_dropout_prob )
lowerCamelCase_ : Union[str, Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(A )
def UpperCAmelCase__ (self , A=None , A=None , A=None , A=None , A=None , A=None , A=None , A=-1 , A=False , ):
lowerCamelCase_ : List[str] = self.num_layers
try:
lowerCamelCase_ : Any = self.roberta(
A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , )
lowerCamelCase_ : List[Any] = outputs[1]
lowerCamelCase_ : List[Any] = self.dropout(A )
lowerCamelCase_ : int = self.classifier(A )
lowerCamelCase_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowerCamelCase_ : Tuple = e.message
lowerCamelCase_ : Union[str, Any] = e.exit_layer
lowerCamelCase_ : Any = outputs[0]
if not self.training:
lowerCamelCase_ : Dict = entropy(A )
lowerCamelCase_ : Dict = []
lowerCamelCase_ : str = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ : Any = MSELoss()
lowerCamelCase_ : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ : int = CrossEntropyLoss()
lowerCamelCase_ : str = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowerCamelCase_ : Optional[int] = []
for highway_exit in outputs[-1]:
lowerCamelCase_ : Union[str, Any] = highway_exit[0]
if not self.training:
highway_logits_all.append(A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowerCamelCase_ : List[Any] = MSELoss()
lowerCamelCase_ : str = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowerCamelCase_ : List[str] = CrossEntropyLoss()
lowerCamelCase_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A )
if train_highway:
lowerCamelCase_ : str = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowerCamelCase_ : Tuple = (loss,) + outputs
if not self.training:
lowerCamelCase_ : Any = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowerCamelCase_ : List[str] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 318
|
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase : str = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = ["input_features", "is_longer"]
def __init__(self , A=6_4 , A=4_8_0_0_0 , A=4_8_0 , A=1_0 , A=1_0_2_4 , A=0.0 , A=False , A = 0 , A = 1_4_0_0_0 , A = None , A = "fusion" , A = "repeatpad" , **A , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
lowerCamelCase_ : Union[str, Any] = top_db
lowerCamelCase_ : Union[str, Any] = truncation
lowerCamelCase_ : Optional[Any] = padding
lowerCamelCase_ : Optional[Any] = fft_window_size
lowerCamelCase_ : Dict = (fft_window_size >> 1) + 1
lowerCamelCase_ : str = hop_length
lowerCamelCase_ : Tuple = max_length_s
lowerCamelCase_ : str = max_length_s * sampling_rate
lowerCamelCase_ : List[str] = sampling_rate
lowerCamelCase_ : Union[str, Any] = frequency_min
lowerCamelCase_ : Optional[Any] = frequency_max
lowerCamelCase_ : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm=A , mel_scale='''htk''' , )
lowerCamelCase_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=A , min_frequency=A , max_frequency=A , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = spectrogram(
A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ : Any = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCamelCase_ : Union[str, Any] = [0]
# randomly choose index for each part
lowerCamelCase_ : Dict = np.random.choice(ranges[0] )
lowerCamelCase_ : str = np.random.choice(ranges[1] )
lowerCamelCase_ : List[Any] = np.random.choice(ranges[2] )
lowerCamelCase_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
lowerCamelCase_ : Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCamelCase_ : str = mel[idx_back : idx_back + chunk_frames, :]
lowerCamelCase_ : Any = torch.tensor(mel[None, None, :] )
lowerCamelCase_ : Optional[Any] = torch.nn.functional.interpolate(
A , size=[chunk_frames, 6_4] , mode='''bilinear''' , align_corners=A )
lowerCamelCase_ : Optional[int] = mel_shrink[0][0].numpy()
lowerCamelCase_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCAmelCase__ (self , A , A , A , A ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCamelCase_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCamelCase_ : Optional[Any] = len(A ) - max_length
lowerCamelCase_ : List[Any] = np.random.randint(0 , overflow + 1 )
lowerCamelCase_ : Dict = waveform[idx : idx + max_length]
lowerCamelCase_ : List[str] = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCamelCase_ : Dict = self._np_extract_fbank_features(A , self.mel_filters )
lowerCamelCase_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCamelCase_ : Tuple = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCamelCase_ : Tuple = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCamelCase_ : Tuple = False
else:
lowerCamelCase_ : str = self._random_mel_fusion(A , A , A )
lowerCamelCase_ : List[str] = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
lowerCamelCase_ : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCamelCase_ : Union[str, Any] = int(max_length / len(A ) )
lowerCamelCase_ : List[str] = np.stack(np.tile(A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCamelCase_ : str = int(max_length / len(A ) )
lowerCamelCase_ : Tuple = np.stack(np.tile(A , A ) )
lowerCamelCase_ : List[str] = np.pad(A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
lowerCamelCase_ : Tuple = self._np_extract_fbank_features(A , self.mel_filters )
lowerCamelCase_ : Union[str, Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCamelCase_ : List[Any] = self._np_extract_fbank_features(A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__(self , A , A = None , A = None , A = None , A = None , A = None , **A , ):
lowerCamelCase_ : Union[str, Any] = truncation if truncation is not None else self.truncation
lowerCamelCase_ : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase_ : Dict = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase_ : str = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ : List[str] = [np.asarray(A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase_ : Dict = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ : Union[str, Any] = [np.asarray(A )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCamelCase_ : Union[str, Any] = [
self._get_input_mel(A , max_length if max_length else self.nb_max_samples , A , A )
for waveform in raw_speech
]
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(A )
is_longer.append(A )
if truncation == "fusion" and sum(A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCamelCase_ : Dict = np.random.randint(0 , len(A ) )
lowerCamelCase_ : List[Any] = True
if isinstance(input_mel[0] , A ):
lowerCamelCase_ : Optional[int] = [np.asarray(A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCamelCase_ : int = [[longer] for longer in is_longer]
lowerCamelCase_ : str = {'''input_features''': input_mel, '''is_longer''': is_longer}
lowerCamelCase_ : int = BatchFeature(A )
if return_tensors is not None:
lowerCamelCase_ : Union[str, Any] = input_features.convert_to_tensors(A )
return input_features
| 318
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowercase ( _lowercase , _lowercase ):
lowerCamelCase : Tuple = 1
@register_to_config
def __init__(self , A = 1_0_0_0 , A = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(A )
# standard deviation of the initial noise distribution
lowerCamelCase_ : int = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCamelCase_ : Tuple = 4
# running values
lowerCamelCase_ : Any = []
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = num_inference_steps
lowerCamelCase_ : Optional[Any] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowerCamelCase_ : int = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCamelCase_ : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowerCamelCase_ : List[Any] = torch.sin(steps * math.pi / 2 ) ** 2
lowerCamelCase_ : Optional[Any] = (1.0 - self.betas**2) ** 0.5
lowerCamelCase_ : Union[str, Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowerCamelCase_ : Any = timesteps.to(A )
lowerCamelCase_ : Union[str, Any] = []
def UpperCAmelCase__ (self , A , A , A , A = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCamelCase_ : List[Any] = (self.timesteps == timestep).nonzero().item()
lowerCamelCase_ : Tuple = timestep_index + 1
lowerCamelCase_ : Optional[int] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
lowerCamelCase_ : Dict = self.ets[-1]
elif len(self.ets ) == 2:
lowerCamelCase_ : Union[str, Any] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCamelCase_ : Tuple = (2_3 * self.ets[-1] - 1_6 * self.ets[-2] + 5 * self.ets[-3]) / 1_2
else:
lowerCamelCase_ : Tuple = (1 / 2_4) * (5_5 * self.ets[-1] - 5_9 * self.ets[-2] + 3_7 * self.ets[-3] - 9 * self.ets[-4])
lowerCamelCase_ : Any = self._get_prev_sample(A , A , A , A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def UpperCAmelCase__ (self , A , *A , **A ):
return sample
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : Tuple = self.alphas[timestep_index]
lowerCamelCase_ : Optional[int] = self.betas[timestep_index]
lowerCamelCase_ : int = self.alphas[prev_timestep_index]
lowerCamelCase_ : Any = self.betas[prev_timestep_index]
lowerCamelCase_ : Union[str, Any] = (sample - sigma * ets) / max(A , 1E-8 )
lowerCamelCase_ : Dict = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__(self ):
return self.config.num_train_timesteps
| 318
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> list:
'''simple docstring'''
if len(_lowercase ) <= 1:
return lst
lowerCamelCase_ : Optional[int] = 1
while i < len(_lowercase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowerCamelCase_ : Any = 1
return lst
if __name__ == "__main__":
__lowercase : Any = input('''Enter numbers separated by a comma:\n''').strip()
__lowercase : str = [int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 318
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__lowercase : List[str] = None
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Optional[int] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : int = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/tokenizer.json''',
},
}
__lowercase : List[Any] = {
'''camembert-base''': 512,
}
__lowercase : Optional[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = ["input_ids", "attention_mask"]
lowerCamelCase : List[str] = CamembertTokenizer
def __init__(self , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=["<s>NOTUSED", "</s>NOTUSED"] , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Dict = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , **A , )
lowerCamelCase_ : Tuple = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase_ : List[str] = [self.cls_token_id]
lowerCamelCase_ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Any = [self.sep_token_id]
lowerCamelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ (self , A , A = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ : int = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase : List[Any] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : Dict = "instructblip_vision_model"
def __init__(self , A=1_4_0_8 , A=6_1_4_4 , A=3_9 , A=1_6 , A=2_2_4 , A=1_4 , A="gelu" , A=1E-6 , A=0.0 , A=1E-10 , A=True , **A , ):
super().__init__(**A )
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Dict = intermediate_size
lowerCamelCase_ : Any = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Any = patch_size
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : Tuple = initializer_range
lowerCamelCase_ : List[Any] = attention_dropout
lowerCamelCase_ : List[str] = layer_norm_eps
lowerCamelCase_ : int = hidden_act
lowerCamelCase_ : Dict = qkv_bias
@classmethod
def UpperCAmelCase__ (cls , A , **A ):
cls._set_token_in_kwargs(A )
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowerCamelCase_ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A , **A )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "instructblip_qformer"
def __init__(self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=2 , A=1_4_0_8 , **A , ):
super().__init__(pad_token_id=A , **A )
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : int = intermediate_size
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : Optional[Any] = max_position_embeddings
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : Optional[int] = position_embedding_type
lowerCamelCase_ : int = cross_attention_frequency
lowerCamelCase_ : Union[str, Any] = encoder_hidden_size
@classmethod
def UpperCAmelCase__ (cls , A , **A ):
cls._set_token_in_kwargs(A )
lowerCamelCase_, lowerCamelCase_ : List[str] = cls.get_config_dict(A , **A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowerCamelCase_ : List[str] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A , **A )
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "instructblip"
lowerCamelCase : int = True
def __init__(self , A=None , A=None , A=None , A=3_2 , **A ):
super().__init__(**A )
if vision_config is None:
lowerCamelCase_ : Optional[Any] = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
lowerCamelCase_ : Optional[Any] = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
lowerCamelCase_ : str = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowerCamelCase_ : str = InstructBlipVisionConfig(**A )
lowerCamelCase_ : Tuple = InstructBlipQFormerConfig(**A )
lowerCamelCase_ : List[str] = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowerCamelCase_ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**A )
lowerCamelCase_ : int = self.text_config.tie_word_embeddings
lowerCamelCase_ : str = self.text_config.is_encoder_decoder
lowerCamelCase_ : List[Any] = num_query_tokens
lowerCamelCase_ : List[Any] = self.vision_config.hidden_size
lowerCamelCase_ : Any = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ : Union[str, Any] = 1.0
lowerCamelCase_ : Any = 0.02
@classmethod
def UpperCAmelCase__ (cls , A , A , A , **A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A , )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict = self.vision_config.to_dict()
lowerCamelCase_ : int = self.qformer_config.to_dict()
lowerCamelCase_ : Optional[int] = self.text_config.to_dict()
lowerCamelCase_ : Any = self.__class__.model_type
return output
| 318
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318
| 1
|
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __lowercase :
def __init__(self , A , A = 1_3 , A = 6_4 , A = 2 , A = 3 , A = 3 , A = True , A = True , A = 1_2_8 , A=[1_6, 3_2, 6_4, 1_2_8] , A = 7 , A = 4 , A = 3_7 , A = "gelu" , A = 0.1 , A = 0.1 , A = 1_0 , A = 0.02 , A = 2 , A = 1 , A = 1_2_8 , A = [2, 2, 2, 2] , A = 2 , A = 2 , ):
lowerCamelCase_ : Optional[Any] = parent
lowerCamelCase_ : Tuple = batch_size
lowerCamelCase_ : int = image_size
lowerCamelCase_ : List[str] = patch_size
lowerCamelCase_ : Tuple = num_channels
lowerCamelCase_ : Tuple = is_training
lowerCamelCase_ : str = use_labels
lowerCamelCase_ : str = hidden_size
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : Union[str, Any] = num_attention_heads
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : str = hidden_act
lowerCamelCase_ : Optional[int] = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : Dict = type_sequence_label_size
lowerCamelCase_ : str = initializer_range
lowerCamelCase_ : Dict = encoder_stride
lowerCamelCase_ : List[Any] = num_attention_outputs
lowerCamelCase_ : int = embed_dim
lowerCamelCase_ : int = embed_dim + 1
lowerCamelCase_ : Optional[int] = resolution
lowerCamelCase_ : Dict = depths
lowerCamelCase_ : Any = hidden_sizes
lowerCamelCase_ : Optional[int] = dim
lowerCamelCase_ : Optional[Any] = mlp_expansion_ratio
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : int = None
if self.use_labels:
lowerCamelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Dict = TFEfficientFormerModel(config=A )
lowerCamelCase_ : str = model(A , training=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Optional[int] = self.type_sequence_label_size
lowerCamelCase_ : Optional[Any] = TFEfficientFormerForImageClassification(A )
lowerCamelCase_ : Union[str, Any] = model(A , labels=A , training=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase_ : Optional[int] = 1
lowerCamelCase_ : List[str] = TFEfficientFormerForImageClassification(A )
lowerCamelCase_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase_ : Optional[Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = config_and_inputs
lowerCamelCase_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : Union[str, Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : Tuple = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCamelCase : List[str] = False
lowerCamelCase : List[Any] = False
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
lowerCamelCase : Tuple = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = TFEfficientFormerModelTester(self )
lowerCamelCase_ : List[str] = ConfigTester(
self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def UpperCAmelCase__ (self ):
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def UpperCAmelCase__ (self ):
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Any = model_class(A )
lowerCamelCase_ : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Dict = [*signature.parameters.keys()]
lowerCamelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def UpperCAmelCase__ (self ):
def check_hidden_states_output(A , A , A ):
lowerCamelCase_ : Any = model_class(A )
lowerCamelCase_ : List[Any] = model(**self._prepare_for_class(A , A ) , training=A )
lowerCamelCase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCamelCase_ : Any = getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A ) , A )
if hasattr(self.model_tester , '''encoder_seq_length''' ):
lowerCamelCase_ : List[str] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , '''chunk_length''' ) and self.model_tester.chunk_length > 1:
lowerCamelCase_ : str = seq_length * self.model_tester.chunk_length
else:
lowerCamelCase_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
lowerCamelCase_ : Tuple = outputs.decoder_hidden_states
self.asseretIsInstance(A , (list, tuple) )
self.assertEqual(len(A ) , A )
lowerCamelCase_ : Any = getattr(self.model_tester , '''seq_length''' , A )
lowerCamelCase_ : int = getattr(self.model_tester , '''decoder_seq_length''' , A )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : str = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : Optional[Any] = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ (self , A , A , A=False ):
lowerCamelCase_ : Dict = super()._prepare_for_class(A , A , return_labels=A )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
@slow
def UpperCAmelCase__ (self ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Union[str, Any] = TFEfficientFormerModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : int = True
lowerCamelCase_ : int = getattr(self.model_tester , '''seq_length''' , A )
lowerCamelCase_ : Union[str, Any] = getattr(self.model_tester , '''encoder_seq_length''' , A )
lowerCamelCase_ : str = getattr(self.model_tester , '''key_length''' , A )
lowerCamelCase_ : List[str] = getattr(self.model_tester , '''chunk_length''' , A )
if chunk_length is not None and hasattr(self.model_tester , '''num_hashes''' ):
lowerCamelCase_ : List[str] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
lowerCamelCase_ : Any = True
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : Dict = True
lowerCamelCase_ : Optional[int] = model_class(A )
lowerCamelCase_ : Optional[Any] = model(**self._prepare_for_class(A , A ) , training=A )
lowerCamelCase_ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Optional[Any] = model_class(A )
lowerCamelCase_ : Optional[int] = model(**self._prepare_for_class(A , A ) , training=A )
lowerCamelCase_ : List[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCAmelCase__ (self ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
lowerCamelCase_, lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
lowerCamelCase_ : str = model_class(A )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
lowerCamelCase_ : Dict = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=A )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
lowerCamelCase_ : Optional[int] = model(A )
self.assertTrue(outputs_dict is not None )
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self ):
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
lowerCamelCase_ : Optional[int] = self.default_image_processor
lowerCamelCase_ : str = prepare_img()
lowerCamelCase_ : List[Any] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowerCamelCase_ : Any = model(**A , training=A )
# verify the logits
lowerCamelCase_ : int = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase_ : Dict = tf.constant([-0.05_55, 0.48_25, -0.08_52] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
lowerCamelCase_ : Any = self.default_image_processor
lowerCamelCase_ : List[str] = prepare_img()
lowerCamelCase_ : List[Any] = image_processor(images=A , return_tensors='''tf''' )
# forward pass
lowerCamelCase_ : str = model(**A , training=A )
# verify the logits
lowerCamelCase_ : Dict = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A )
lowerCamelCase_ : List[Any] = tf.constant([-0.13_12, 0.43_53, -1.04_99] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
| 318
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318
| 1
|
'''simple docstring'''
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowercase_ ( _lowercase , _lowercase = "cpu" , _lowercase = None ) -> None:
'''simple docstring'''
lowerCamelCase_ : int = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
lowerCamelCase_ : List[Any] = v.half()
if save_path is None: # overwrite src_path
lowerCamelCase_ : Tuple = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 318
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
| 1
|
'''simple docstring'''
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Dict = {}
def UpperCAmelCase__ (self ):
print(self.vertex )
for i in self.vertex:
print(A , ''' -> ''' , ''' -> '''.join([str(A ) for j in self.vertex[i]] ) )
def UpperCAmelCase__ (self , A , A ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(A )
else:
# else make a new vertex
lowerCamelCase_ : int = [to_vertex]
def UpperCAmelCase__ (self ):
# visited array for storing already visited nodes
lowerCamelCase_ : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(A , A )
def UpperCAmelCase__ (self , A , A ):
# mark start vertex as visited
lowerCamelCase_ : Tuple = True
print(A , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(A , A )
if __name__ == "__main__":
__lowercase : Optional[int] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 318
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class __lowercase ( _lowercase ):
lowerCamelCase : Dict = "distilbert"
lowerCamelCase : Optional[Any] = {
"hidden_size": "dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
}
def __init__(self , A=3_0_5_2_2 , A=5_1_2 , A=False , A=6 , A=1_2 , A=7_6_8 , A=4 * 7_6_8 , A=0.1 , A=0.1 , A="gelu" , A=0.02 , A=0.1 , A=0.2 , A=0 , **A , ):
lowerCamelCase_ : str = vocab_size
lowerCamelCase_ : Tuple = max_position_embeddings
lowerCamelCase_ : Dict = sinusoidal_pos_embds
lowerCamelCase_ : List[str] = n_layers
lowerCamelCase_ : List[Any] = n_heads
lowerCamelCase_ : Tuple = dim
lowerCamelCase_ : int = hidden_dim
lowerCamelCase_ : Union[str, Any] = dropout
lowerCamelCase_ : Optional[Any] = attention_dropout
lowerCamelCase_ : List[str] = activation
lowerCamelCase_ : Optional[int] = initializer_range
lowerCamelCase_ : Union[str, Any] = qa_dropout
lowerCamelCase_ : Union[str, Any] = seq_classif_dropout
super().__init__(**A , pad_token_id=A )
class __lowercase ( _lowercase ):
@property
def UpperCAmelCase__ (self ):
if self.task == "multiple-choice":
lowerCamelCase_ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 318
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = None , _lowercase = False , ) -> tuple[int, float, str]:
'''simple docstring'''
lowerCamelCase_ : Dict = cipher_alphabet or [chr(_lowercase ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase_ : List[Any] = {
'''a''': 0.0_84_97,
'''b''': 0.0_14_92,
'''c''': 0.0_22_02,
'''d''': 0.0_42_53,
'''e''': 0.1_11_62,
'''f''': 0.0_22_28,
'''g''': 0.0_20_15,
'''h''': 0.0_60_94,
'''i''': 0.0_75_46,
'''j''': 0.0_01_53,
'''k''': 0.0_12_92,
'''l''': 0.0_40_25,
'''m''': 0.0_24_06,
'''n''': 0.0_67_49,
'''o''': 0.0_75_07,
'''p''': 0.0_19_29,
'''q''': 0.0_00_95,
'''r''': 0.0_75_87,
'''s''': 0.0_63_27,
'''t''': 0.0_93_56,
'''u''': 0.0_27_58,
'''v''': 0.0_09_78,
'''w''': 0.0_25_60,
'''x''': 0.0_01_50,
'''y''': 0.0_19_94,
'''z''': 0.0_00_77,
}
else:
# Custom frequencies dictionary
lowerCamelCase_ : Union[str, Any] = frequencies_dict
if not case_sensitive:
lowerCamelCase_ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase_ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_lowercase ) ):
lowerCamelCase_ : Optional[int] = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase_ : Optional[int] = (alphabet_letters.index(letter.lower() ) - shift) % len(
_lowercase )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase_ : int = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase_ : Union[str, Any] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ : str = decrypted_with_shift.lower().count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase_ : List[Any] = decrypted_with_shift.count(_lowercase )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase_ : Optional[int] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase_ : List[str] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase_ : str = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_lowercase ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase_ : int = min(
_lowercase , key=_lowercase , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase_
), (
lowerCamelCase_
),
) : Tuple = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 318
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : List[str] = [0 for i in range(len(_lowercase ) )]
# initialize interval's left pointer and right pointer
lowerCamelCase_, lowerCamelCase_ : int = 0, 0
for i in range(1 , len(_lowercase ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowerCamelCase_ : Union[str, Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowerCamelCase_ : List[str] = min_edge
while go_next(_lowercase , _lowercase , _lowercase ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowerCamelCase_, lowerCamelCase_ : Optional[int] = i, i + z_result[i] - 1
return z_result
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(_lowercase ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowerCamelCase_ : Union[str, Any] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(_lowercase ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowercase : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Any = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowercase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
class __lowercase :
def __init__(self , A ):
lowerCamelCase_ : List[Any] = size
# approximate the overall size of segment tree with given value
lowerCamelCase_ : Union[str, Any] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowerCamelCase_ : Dict = [0 for i in range(0 , 4 * size )]
lowerCamelCase_ : List[Any] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def UpperCAmelCase__ (self , A ):
return idx * 2
def UpperCAmelCase__ (self , A ):
return idx * 2 + 1
def UpperCAmelCase__ (self , A , A , A , A ):
if left_element == right_element:
lowerCamelCase_ : List[str] = a[left_element - 1]
else:
lowerCamelCase_ : Union[str, Any] = (left_element + right_element) // 2
self.build(self.left(A ) , A , A , A )
self.build(self.right(A ) , mid + 1 , A , A )
lowerCamelCase_ : Optional[int] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
def UpperCAmelCase__ (self , A , A , A , A , A , A ):
if self.flag[idx] is True:
lowerCamelCase_ : Optional[int] = self.lazy[idx]
lowerCamelCase_ : Tuple = False
if left_element != right_element:
lowerCamelCase_ : Dict = self.lazy[idx]
lowerCamelCase_ : List[str] = self.lazy[idx]
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : List[str] = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowerCamelCase_ : Union[str, Any] = val
if left_element != right_element:
lowerCamelCase_ : Union[str, Any] = val
lowerCamelCase_ : Any = val
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Optional[int] = True
return True
lowerCamelCase_ : List[Any] = (left_element + right_element) // 2
self.update(self.left(A ) , A , A , A , A , A )
self.update(self.right(A ) , mid + 1 , A , A , A , A )
lowerCamelCase_ : Optional[Any] = max(
self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] )
return True
def UpperCAmelCase__ (self , A , A , A , A , A ):
if self.flag[idx] is True:
lowerCamelCase_ : Dict = self.lazy[idx]
lowerCamelCase_ : Tuple = False
if left_element != right_element:
lowerCamelCase_ : int = self.lazy[idx]
lowerCamelCase_ : Optional[int] = self.lazy[idx]
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : str = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowerCamelCase_ : Optional[int] = (left_element + right_element) // 2
lowerCamelCase_ : Optional[Any] = self.query(self.left(A ) , A , A , A , A )
lowerCamelCase_ : Any = self.query(self.right(A ) , mid + 1 , A , A , A )
return max(A , A )
def __str__(self ):
return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
__lowercase : str = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__lowercase : List[str] = 15
__lowercase : List[str] = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 318
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ : str = BertConfig.from_json_file(_lowercase )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase_ : Any = BertForPreTraining(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
__lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowercase : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 318
|
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318
| 1
|
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _lowercase ):
lowerCamelCase : str = (EulerDiscreteScheduler,)
lowerCamelCase : int = 10
def UpperCAmelCase__ (self , **A ):
lowerCamelCase_ : List[str] = {
'''num_train_timesteps''': 1_1_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**A )
return config
def UpperCAmelCase__ (self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase__ (self ):
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02] ):
self.check_over_configs(beta_start=A , beta_end=A )
def UpperCAmelCase__ (self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A )
def UpperCAmelCase__ (self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : List[Any] = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : List[str] = torch.manual_seed(0 )
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : int = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : int = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : Tuple = scheduler.scale_model_input(A , A )
lowerCamelCase_ : Dict = model(A , A )
lowerCamelCase_ : Optional[Any] = scheduler.step(A , A , A , generator=A )
lowerCamelCase_ : List[Any] = output.prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase_ : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase_ : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase_ : Optional[int] = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps )
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = self.dummy_model()
lowerCamelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCamelCase_ : str = sample.to(A )
for i, t in enumerate(scheduler.timesteps ):
lowerCamelCase_ : int = scheduler.scale_model_input(A , A )
lowerCamelCase_ : str = model(A , A )
lowerCamelCase_ : Dict = scheduler.step(A , A , A , generator=A )
lowerCamelCase_ : Union[str, Any] = output.prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase_ : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 0.00_02 ) < 1E-2
assert abs(result_mean.item() - 2.2676E-06 ) < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase_ : Any = self.get_scheduler_config()
lowerCamelCase_ : Dict = scheduler_class(**A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
lowerCamelCase_ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase_ : str = self.dummy_model()
lowerCamelCase_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ : List[Any] = sample.to(A )
for t in scheduler.timesteps:
lowerCamelCase_ : str = scheduler.scale_model_input(A , A )
lowerCamelCase_ : Tuple = model(A , A )
lowerCamelCase_ : int = scheduler.step(A , A , A , generator=A )
lowerCamelCase_ : List[Any] = output.prev_sample
lowerCamelCase_ : int = torch.sum(torch.abs(A ) )
lowerCamelCase_ : int = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 10.08_07 ) < 1E-2
assert abs(result_mean.item() - 0.01_31 ) < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.scheduler_classes[0]
lowerCamelCase_ : Dict = self.get_scheduler_config()
lowerCamelCase_ : Optional[int] = scheduler_class(**A , use_karras_sigmas=A )
scheduler.set_timesteps(self.num_inference_steps , device=A )
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = self.dummy_model()
lowerCamelCase_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCamelCase_ : List[str] = sample.to(A )
for t in scheduler.timesteps:
lowerCamelCase_ : Optional[Any] = scheduler.scale_model_input(A , A )
lowerCamelCase_ : str = model(A , A )
lowerCamelCase_ : Dict = scheduler.step(A , A , A , generator=A )
lowerCamelCase_ : Dict = output.prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase_ : List[str] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1_24.52_29_94_99_51_17_19 ) < 1E-2
assert abs(result_mean.item() - 0.1_62_13_93_26_33_39_99_63 ) < 1E-3
| 318
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : str = torch.load(_lowercase , map_location='''cpu''' )
if "model" in sd.keys():
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
lowerCamelCase_ : Union[str, Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(_lowercase )
lowerCamelCase_ : int = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
lowerCamelCase_ : Any = sd.pop(_lowercase )
lowerCamelCase_ : List[str] = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
lowerCamelCase_ : int = sd[key]
# We split QKV in separate Q,K,V
lowerCamelCase_ : List[str] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
lowerCamelCase_ : str = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
lowerCamelCase_ : Tuple = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
lowerCamelCase_ : str = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = torch.split(_lowercase , depth // 3 , dim=0 )
lowerCamelCase_ : List[Any] = q
lowerCamelCase_ : Optional[Any] = k
lowerCamelCase_ : int = v
del sd[key]
return sd
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase=None ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = load_checkpoint(_lowercase )
if config is not None:
lowerCamelCase_ : List[str] = OPTConfig.from_pretrained(_lowercase )
else:
lowerCamelCase_ : List[Any] = OPTConfig()
lowerCamelCase_ : int = OPTModel(_lowercase ).half().eval()
model.load_state_dict(_lowercase )
# Check results
Path(_lowercase ).mkdir(exist_ok=_lowercase )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase : int = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 318
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and number_of_steps > 0
), F"""number_of_steps needs to be positive integer, your input {number_of_steps}"""
if number_of_steps == 1:
return 1
lowerCamelCase_, lowerCamelCase_ : List[Any] = 1, 1
for _ in range(number_of_steps - 1 ):
lowerCamelCase_, lowerCamelCase_ : Any = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318
| 1
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowercase : Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
lowerCamelCase : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
lowerCamelCase : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.task_name.lower()
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "train"
lowerCamelCase : Any = "dev"
lowerCamelCase : Optional[int] = "test"
class __lowercase ( _lowercase ):
lowerCamelCase : GlueDataTrainingArguments
lowerCamelCase : str
lowerCamelCase : List[InputFeatures]
def __init__(self , A , A , A = None , A = Split.train , A = None , ):
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , A , )
lowerCamelCase_ : Optional[Any] = args
lowerCamelCase_ : int = glue_processors[args.task_name]()
lowerCamelCase_ : List[str] = glue_output_modes[args.task_name]
if isinstance(A , A ):
try:
lowerCamelCase_ : List[Any] = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
lowerCamelCase_ : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
lowerCamelCase_ : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowerCamelCase_, lowerCamelCase_ : Optional[int] = label_list[2], label_list[1]
lowerCamelCase_ : Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ : Union[str, Any] = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
lowerCamelCase_ : Tuple = time.time()
lowerCamelCase_ : int = torch.load(A )
logger.info(
F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(F"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
lowerCamelCase_ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowerCamelCase_ : str = self.processor.get_test_examples(args.data_dir )
else:
lowerCamelCase_ : Tuple = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowerCamelCase_ : List[Any] = examples[:limit_length]
lowerCamelCase_ : Optional[Any] = glue_convert_examples_to_features(
A , A , max_length=args.max_seq_length , label_list=A , output_mode=self.output_mode , )
lowerCamelCase_ : List[str] = time.time()
torch.save(self.features , A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__(self ):
return len(self.features )
def __getitem__(self , A ):
return self.features[i]
def UpperCAmelCase__ (self ):
return self.label_list
| 318
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__lowercase : List[Any] = '''docs/source/en/_toctree.yml'''
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Dict = defaultdict(_lowercase )
for doc in model_doc:
counts[doc["local"]] += 1
lowerCamelCase_ : Optional[int] = [key for key, value in counts.items() if value > 1]
lowerCamelCase_ : List[Any] = []
for duplicate_key in duplicates:
lowerCamelCase_ : Union[str, Any] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(_lowercase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(_lowercase , key=lambda _lowercase : s["title"].lower() )
def lowercase_ ( _lowercase=False ) -> Tuple:
'''simple docstring'''
with open(_lowercase , encoding='''utf-8''' ) as f:
lowerCamelCase_ : str = yaml.safe_load(f.read() )
# Get to the API doc
lowerCamelCase_ : List[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCamelCase_ : Any = content[api_idx]['''sections''']
# Then to the model doc
lowerCamelCase_ : Optional[int] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowerCamelCase_ : Tuple = api_doc[model_idx]['''sections''']
lowerCamelCase_ : Optional[Any] = [(idx, section) for idx, section in enumerate(_lowercase ) if '''sections''' in section]
lowerCamelCase_ : List[str] = False
for idx, modality_doc in modalities_docs:
lowerCamelCase_ : str = modality_doc['''sections''']
lowerCamelCase_ : List[str] = clean_model_doc_toc(_lowercase )
if old_modality_doc != new_modality_doc:
lowerCamelCase_ : Tuple = True
if overwrite:
lowerCamelCase_ : Dict = new_modality_doc
if diff:
if overwrite:
lowerCamelCase_ : Optional[Any] = model_doc
lowerCamelCase_ : Any = api_doc
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(_lowercase , allow_unicode=_lowercase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowercase : List[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 318
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318
| 1
|
'''simple docstring'''
from collections.abc import Callable
class __lowercase :
def __init__(self , A = None ):
# Stores actual heap items.
lowerCamelCase_ : list = []
# Stores indexes of each item for supporting updates and deletion.
lowerCamelCase_ : dict = {}
# Stores current size of heap.
lowerCamelCase_ : Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
lowerCamelCase_ : Any = key or (lambda A : x)
def UpperCAmelCase__ (self , A ):
return int((i - 1) / 2 ) if i > 0 else None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : str = int(2 * i + 1 )
return left if 0 < left < self.size else None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = int(2 * i + 2 )
return right if 0 < right < self.size else None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_, lowerCamelCase_ : Dict = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
lowerCamelCase_, lowerCamelCase_ : List[Any] = self.arr[j], self.arr[i]
def UpperCAmelCase__ (self , A , A ):
return self.arr[i][1] < self.arr[j][1]
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[Any] = self._left(A )
lowerCamelCase_ : Tuple = self._right(A )
lowerCamelCase_ : Optional[int] = i
if left is not None and not self._cmp(A , A ):
lowerCamelCase_ : Tuple = left
if right is not None and not self._cmp(A , A ):
lowerCamelCase_ : Optional[int] = right
return valid_parent
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = self._parent(A )
while parent is not None and not self._cmp(A , A ):
self._swap(A , A )
lowerCamelCase_, lowerCamelCase_ : Dict = parent, self._parent(A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : str = self._get_valid_parent(A )
while valid_parent != index:
self._swap(A , A )
lowerCamelCase_, lowerCamelCase_ : str = valid_parent, self._get_valid_parent(A )
def UpperCAmelCase__ (self , A , A ):
if item not in self.pos_map:
return
lowerCamelCase_ : str = self.pos_map[item]
lowerCamelCase_ : Optional[int] = [item, self.key(A )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(A )
self._heapify_down(A )
def UpperCAmelCase__ (self , A ):
if item not in self.pos_map:
return
lowerCamelCase_ : List[str] = self.pos_map[item]
del self.pos_map[item]
lowerCamelCase_ : List[Any] = self.arr[self.size - 1]
lowerCamelCase_ : Any = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(A )
self._heapify_down(A )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Union[str, Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(A )] )
else:
lowerCamelCase_ : Tuple = [item, self.key(A )]
lowerCamelCase_ : str = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def UpperCAmelCase__ (self ):
return self.arr[0] if self.size else None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def lowercase_ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
__lowercase : int = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : str = Github(os.environ['''GITHUB_TOKEN'''] )
lowerCamelCase_ : str = g.get_repo('''huggingface/transformers''' )
lowerCamelCase_ : Optional[Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
lowerCamelCase_ : int = sorted([comment for comment in issue.get_comments()] , key=lambda _lowercase : i.created_at , reverse=_lowercase )
lowerCamelCase_ : Optional[Any] = comments[0] if len(_lowercase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 318
|
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318
| 1
|
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = filter(lambda _lowercase : p.requires_grad , model.parameters() )
lowerCamelCase_ : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__lowercase : Dict = logging.getLogger(__name__)
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
if metric == "rouge2":
lowerCamelCase_ : Optional[Any] = '''{val_avg_rouge2:.4f}-{step_count}'''
elif metric == "bleu":
lowerCamelCase_ : Optional[int] = '''{val_avg_bleu:.4f}-{step_count}'''
elif metric == "em":
lowerCamelCase_ : Tuple = '''{val_avg_em:.4f}-{step_count}'''
elif metric == "loss":
lowerCamelCase_ : Optional[int] = '''{val_avg_loss:.4f}-{step_count}'''
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
''' function.''' )
lowerCamelCase_ : Any = ModelCheckpoint(
dirpath=_lowercase , filename=_lowercase , monitor=F"""val_{metric}""" , mode='''max''' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowercase_ ( _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='''min''' if '''loss''' in metric else '''max''' , patience=_lowercase , verbose=_lowercase , )
class __lowercase ( pl.Callback ):
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : int = {F"""lr_group_{i}""": param['''lr'''] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(A )
@rank_zero_only
def UpperCAmelCase__ (self , A , A , A , A=True ):
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
lowerCamelCase_ : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCamelCase_ : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase_ : Optional[int] = od / '''test_results.txt'''
lowerCamelCase_ : Optional[Any] = od / '''test_generations.txt'''
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase_ : Optional[int] = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
lowerCamelCase_ : Optional[int] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=A )
generations_file.parent.mkdir(exist_ok=A )
with open(A , '''a+''' ) as writer:
for key in sorted(A ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase_ : Optional[Any] = metrics[key]
if isinstance(A , torch.Tensor ):
lowerCamelCase_ : Dict = val.item()
lowerCamelCase_ : List[Any] = F"""{key}: {val:.6f}\n"""
writer.write(A )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase_ : Optional[int] = '''\n'''.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(A )
@rank_zero_only
def UpperCAmelCase__ (self , A , A ):
try:
lowerCamelCase_ : Optional[Any] = pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase_ : List[str] = pl_module.model.num_parameters()
lowerCamelCase_ : Optional[int] = count_trainable_parameters(A )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def UpperCAmelCase__ (self , A , A ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(A , A , '''test''' )
@rank_zero_only
def UpperCAmelCase__ (self , A , A ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 318
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__lowercase : List[str] = input('''Enter image url: ''').strip()
print(f'Downloading image from {url} ...')
__lowercase : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__lowercase : Any = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__lowercase : List[str] = requests.get(image_url).content
__lowercase : Optional[Any] = f'{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg'
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(f'Done. Image saved to disk as {file_name}.')
| 318
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318
| 1
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class __lowercase :
def __init__(self , A , A , A , A=None , A=None ):
lowerCamelCase_ : List[str] = start
lowerCamelCase_ : Optional[int] = end
lowerCamelCase_ : str = val
lowerCamelCase_ : str = (start + end) // 2
lowerCamelCase_ : Any = left
lowerCamelCase_ : List[str] = right
def __repr__(self ):
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = collection
lowerCamelCase_ : Tuple = function
if self.collection:
lowerCamelCase_ : Dict = self._build_tree(0 , len(A ) - 1 )
def UpperCAmelCase__ (self , A , A ):
self._update_tree(self.root , A , A )
def UpperCAmelCase__ (self , A , A ):
return self._query_range(self.root , A , A )
def UpperCAmelCase__ (self , A , A ):
if start == end:
return SegmentTreeNode(A , A , self.collection[start] )
lowerCamelCase_ : List[Any] = (start + end) // 2
lowerCamelCase_ : List[str] = self._build_tree(A , A )
lowerCamelCase_ : List[str] = self._build_tree(mid + 1 , A )
return SegmentTreeNode(A , A , self.fn(left.val , right.val ) , A , A )
def UpperCAmelCase__ (self , A , A , A ):
if node.start == i and node.end == i:
lowerCamelCase_ : Union[str, Any] = val
return
if i <= node.mid:
self._update_tree(node.left , A , A )
else:
self._update_tree(node.right , A , A )
lowerCamelCase_ : str = self.fn(node.left.val , node.right.val )
def UpperCAmelCase__ (self , A , A , A ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , A , A )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , A , node.mid ) , self._query_range(node.right , node.mid + 1 , A ) , )
else:
# range in right child tree
return self._query_range(node.right , A , A )
def UpperCAmelCase__ (self ):
if self.root is not None:
lowerCamelCase_ : str = Queue()
queue.put(self.root )
while not queue.empty():
lowerCamelCase_ : List[str] = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('''*''' * 50)
__lowercase : List[str] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 318
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : Optional[int] = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : int = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318
| 1
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
__lowercase : List[Any] = '''examples/'''
__lowercase : Optional[int] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
__lowercase : List[Any] = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
__lowercase : Optional[Any] = '''README.md'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ : List[str] = f.read()
lowerCamelCase_, lowerCamelCase_ : Optional[int] = REPLACE_PATTERNS[pattern]
lowerCamelCase_ : Dict = replace.replace('''VERSION''' , _lowercase )
lowerCamelCase_ : int = re_pattern.sub(_lowercase , _lowercase )
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(_lowercase )
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
for folder, directories, fnames in os.walk(_lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' )
def lowercase_ ( _lowercase , _lowercase=False ) -> str:
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_lowercase , _lowercase , _lowercase )
if not patch:
update_version_in_examples(_lowercase )
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''🤗 Transformers currently provides the following architectures'''
lowerCamelCase_ : str = '''1. Want to contribute a new model?'''
with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ : List[str] = f.readlines()
# Find the start of the list.
lowerCamelCase_ : Any = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase_ : Dict = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCamelCase_ : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , )
index += 1
with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(_lowercase )
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
with open(REPLACE_FILES['''init'''] , '''r''' ) as f:
lowerCamelCase_ : int = f.read()
lowerCamelCase_ : Union[str, Any] = REPLACE_PATTERNS['''init'''][0].search(_lowercase ).groups()[0]
return packaging.version.parse(_lowercase )
def lowercase_ ( _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCamelCase_ : Dict = default_version.base_version
elif patch:
lowerCamelCase_ : str = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCamelCase_ : int = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCamelCase_ : Union[str, Any] = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_lowercase ) == 0:
lowerCamelCase_ : Optional[Any] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase , patch=_lowercase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = get_version()
lowerCamelCase_ : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCamelCase_ : Union[str, Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase_ : Dict = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_lowercase ) == 0:
lowerCamelCase_ : Optional[Any] = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_lowercase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
__lowercase : str = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 318
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) == 0 )
def lowercase_ ( ) -> None:
'''simple docstring'''
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 318
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = ""
lowerCamelCase : List[Any] = "hf-legacy" # "hf://"" is reserved for hffs
def __init__(self , A = None , A = None , **A , ):
super().__init__(self , **A )
lowerCamelCase_ : List[str] = repo_info
lowerCamelCase_ : Optional[int] = token
lowerCamelCase_ : int = None
def UpperCAmelCase__ (self ):
if self.dir_cache is None:
lowerCamelCase_ : str = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase_ : Union[str, Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(A ): {'''name''': str(A ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCAmelCase__ (self , A , A = "rb" , **A , ):
if not isinstance(self.repo_info , A ):
raise NotImplementedError(F"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowerCamelCase_ : Optional[int] = hf_hub_url(self.repo_info.id , A , revision=self.repo_info.sha )
return fsspec.open(
A , mode=A , headers=get_authentication_headers_for_url(A , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def UpperCAmelCase__ (self , A , **A ):
self._get_dirs()
lowerCamelCase_ : Dict = self._strip_protocol(A )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A )
def UpperCAmelCase__ (self , A , A=False , **A ):
self._get_dirs()
lowerCamelCase_ : Any = PurePosixPath(path.strip('''/''' ) )
lowerCamelCase_ : List[Any] = {}
for p, f in self.dir_cache.items():
lowerCamelCase_ : Union[str, Any] = PurePosixPath(p.strip('''/''' ) )
lowerCamelCase_ : Union[str, Any] = p.parent
if root == path:
lowerCamelCase_ : List[str] = f
lowerCamelCase_ : Dict = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 318
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class __lowercase :
def __init__(self , A , ):
lowerCamelCase_ : Tuple = parent
lowerCamelCase_ : str = 1_3
lowerCamelCase_ : Tuple = 7
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : Dict = True
lowerCamelCase_ : str = False
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[Any] = 9_9
lowerCamelCase_ : Tuple = 3_2
lowerCamelCase_ : Optional[Any] = 2
lowerCamelCase_ : List[Any] = 4
lowerCamelCase_ : Dict = 3_7
lowerCamelCase_ : Any = '''gelu'''
lowerCamelCase_ : Any = 0.1
lowerCamelCase_ : int = 0.1
lowerCamelCase_ : List[str] = 5_1_2
lowerCamelCase_ : Tuple = 1_6
lowerCamelCase_ : int = 2
lowerCamelCase_ : Union[str, Any] = 0.02
lowerCamelCase_ : Tuple = 3
lowerCamelCase_ : List[str] = 4
lowerCamelCase_ : int = None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : List[str] = None
if self.use_input_mask:
lowerCamelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : str = None
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Tuple = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ (self , A , A , A , A , A , A ):
lowerCamelCase_ : str = TFDistilBertModel(config=A )
lowerCamelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ : Any = model(A )
lowerCamelCase_ : Tuple = [input_ids, input_mask]
lowerCamelCase_ : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A ):
lowerCamelCase_ : Any = TFDistilBertForMaskedLM(config=A )
lowerCamelCase_ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ : int = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = TFDistilBertForQuestionAnswering(config=A )
lowerCamelCase_ : Union[str, Any] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowerCamelCase_ : str = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ (self , A , A , A , A , A , A ):
lowerCamelCase_ : Union[str, Any] = self.num_labels
lowerCamelCase_ : List[Any] = TFDistilBertForSequenceClassification(A )
lowerCamelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ : List[str] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self , A , A , A , A , A , A ):
lowerCamelCase_ : Tuple = self.num_choices
lowerCamelCase_ : Optional[Any] = TFDistilBertForMultipleChoice(A )
lowerCamelCase_ : Tuple = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ : str = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowerCamelCase_ : Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ (self , A , A , A , A , A , A ):
lowerCamelCase_ : Any = self.num_labels
lowerCamelCase_ : Union[str, Any] = TFDistilBertForTokenClassification(A )
lowerCamelCase_ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowerCamelCase_ : Optional[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.prepare_config_and_inputs()
((lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_)) : int = config_and_inputs
lowerCamelCase_ : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : str = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
lowerCamelCase : str = (
{
"feature-extraction": TFDistilBertModel,
"fill-mask": TFDistilBertForMaskedLM,
"question-answering": TFDistilBertForQuestionAnswering,
"text-classification": TFDistilBertForSequenceClassification,
"token-classification": TFDistilBertForTokenClassification,
"zero-shot": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = TFDistilBertModelTester(self )
lowerCamelCase_ : List[str] = ConfigTester(self , config_class=A , dim=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*A )
@slow
def UpperCAmelCase__ (self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCamelCase_ : List[Any] = TFDistilBertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCamelCase_ : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase_ : Dict = model(A )[0]
lowerCamelCase_ : List[Any] = [1, 6, 7_6_8]
self.assertEqual(output.shape , A )
lowerCamelCase_ : Dict = tf.constant(
[
[
[0.19_26_18_85, -0.13_73_29_55, 0.4_11_97_99],
[0.22_15_01_56, -0.07_42_26_61, 0.39_03_72_04],
[0.22_75_60_18, -0.0_89_64_14, 0.3_70_14_67],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A , atol=1E-4 )
| 318
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowercase ) )
def lowercase_ ( _lowercase , _lowercase ) -> float:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = 0.0
for coeff in reversed(_lowercase ):
lowerCamelCase_ : Any = result * x + coeff
return result
if __name__ == "__main__":
__lowercase : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
__lowercase : Union[str, Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 318
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase : List[str] = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[str] = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : List[Any] = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__lowercase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 318
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318
| 1
|
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
|
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : List[Any] = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
lowerCamelCase : int = "timm_backbone"
def __init__(self , A=None , A=3 , A=True , A=True , A=None , **A , ):
super().__init__(**A )
lowerCamelCase_ : int = backbone
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : int = features_only
lowerCamelCase_ : str = use_pretrained_backbone
lowerCamelCase_ : int = True
lowerCamelCase_ : List[str] = out_indices if out_indices is not None else (-1,)
| 318
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 1
|
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('''Googling.....''')
__lowercase : Optional[int] = '''https://www.google.com/search?q=''' + ''' '''.join(sys.argv[1:])
__lowercase : Optional[int] = requests.get(url, headers={'''UserAgent''': UserAgent().random})
# res.raise_for_status()
with open('''project1a.html''', '''wb''') as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
__lowercase : Tuple = BeautifulSoup(res.text, '''html.parser''')
__lowercase : Union[str, Any] = list(soup.select('''.eZt8xd'''))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('''href'''))
else:
webbrowser.open(f'https://google.com{link.get("href")}')
| 318
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318
| 1
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318
|
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Dict = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[Any] = "lilt"
def __init__(self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=None , A=4 , A=1_0_2_4 , **A , ):
super().__init__(pad_token_id=A , **A )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : List[str] = hidden_size
lowerCamelCase_ : List[str] = num_hidden_layers
lowerCamelCase_ : List[str] = num_attention_heads
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : List[str] = intermediate_size
lowerCamelCase_ : str = hidden_dropout_prob
lowerCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
lowerCamelCase_ : int = max_position_embeddings
lowerCamelCase_ : Any = type_vocab_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Any = layer_norm_eps
lowerCamelCase_ : Optional[int] = position_embedding_type
lowerCamelCase_ : Tuple = classifier_dropout
lowerCamelCase_ : str = channel_shrink_ratio
lowerCamelCase_ : str = max_ad_position_embeddings
| 318
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318
| 1
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 318
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase = 4 ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase_ : int = abs(_lowercase ) or 4
return [[1 + x + y * row_size for x in range(_lowercase )] for y in range(_lowercase )]
def lowercase_ ( _lowercase ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(transpose(_lowercase ) )
# OR.. transpose(reverse_column(matrix))
def lowercase_ ( _lowercase ) -> list[list[int]]:
'''simple docstring'''
return reverse_row(reverse_column(_lowercase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase_ ( _lowercase ) -> list[list[int]]:
'''simple docstring'''
return reverse_column(transpose(_lowercase ) )
# OR.. transpose(reverse_row(matrix))
def lowercase_ ( _lowercase ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase_ : int = [list(_lowercase ) for x in zip(*_lowercase )]
return matrix
def lowercase_ ( _lowercase ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase_ : str = matrix[::-1]
return matrix
def lowercase_ ( _lowercase ) -> list[list[int]]:
'''simple docstring'''
lowerCamelCase_ : List[Any] = [x[::-1] for x in matrix]
return matrix
def lowercase_ ( _lowercase ) -> None:
'''simple docstring'''
for i in matrix:
print(*_lowercase )
if __name__ == "__main__":
__lowercase : int = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
__lowercase : str = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
__lowercase : Optional[Any] = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 318
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase , _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : List[str] = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase_ : Dict = n - k
# Calculate C(n,k)
for i in range(_lowercase ):
result *= n - i
result //= i + 1
return result
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , _lowercase ) // (node_count + 1)
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
lowerCamelCase_ : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
return catalan_number(_lowercase ) * factorial(_lowercase )
if __name__ == "__main__":
__lowercase : List[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f'Given {node_count} nodes, there are {binary_tree_count(node_count)} '
f'binary trees and {catalan_number(node_count)} binary search trees.'
)
| 318
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318
| 1
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowercase :
def __init__(self , A , A=1_3 , A=7 , A=True , A=True , A=False , A=True , A=9_9 , A=3_2 , A=5 , A=4 , A=3_7 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=1_6 , A=2 , A=0.02 , A=3 , A=4 , A=None , ):
lowerCamelCase_ : Any = parent
lowerCamelCase_ : int = batch_size
lowerCamelCase_ : List[str] = seq_length
lowerCamelCase_ : List[str] = is_training
lowerCamelCase_ : Union[str, Any] = use_input_mask
lowerCamelCase_ : int = use_token_type_ids
lowerCamelCase_ : Any = use_labels
lowerCamelCase_ : Optional[int] = vocab_size
lowerCamelCase_ : Union[str, Any] = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : int = num_attention_heads
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Dict = hidden_act
lowerCamelCase_ : int = hidden_dropout_prob
lowerCamelCase_ : str = attention_probs_dropout_prob
lowerCamelCase_ : List[str] = max_position_embeddings
lowerCamelCase_ : str = type_vocab_size
lowerCamelCase_ : Optional[Any] = type_sequence_label_size
lowerCamelCase_ : Optional[Any] = initializer_range
lowerCamelCase_ : str = num_labels
lowerCamelCase_ : Tuple = num_choices
lowerCamelCase_ : Optional[Any] = scope
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : int = None
if self.use_input_mask:
lowerCamelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCamelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ : Tuple = None
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Dict = None
if self.use_labels:
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ (self ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A ):
lowerCamelCase_ : str = LlamaModel(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : int = model(A , attention_mask=A )
lowerCamelCase_ : List[Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A , A , A , ):
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Optional[Any] = LlamaModel(A )
model.to(A )
model.eval()
lowerCamelCase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , )
lowerCamelCase_ : Optional[int] = model(
A , attention_mask=A , encoder_hidden_states=A , )
lowerCamelCase_ : str = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A , A , A , ):
lowerCamelCase_ : Union[str, Any] = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
lowerCamelCase_ : Dict = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ (self , A , A , A , A , A , A , A , A , A , ):
lowerCamelCase_ : str = True
lowerCamelCase_ : List[str] = True
lowerCamelCase_ : Tuple = LlamaForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
lowerCamelCase_ : Union[str, Any] = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , use_cache=A , )
lowerCamelCase_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCamelCase_ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase_ : Tuple = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCamelCase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase_ : List[str] = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCamelCase_ : int = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , output_hidden_states=A , )['''hidden_states'''][0]
lowerCamelCase_ : Tuple = model(
A , attention_mask=A , encoder_hidden_states=A , encoder_attention_mask=A , past_key_values=A , output_hidden_states=A , )['''hidden_states'''][0]
# select random slice
lowerCamelCase_ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase_ : Dict = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCamelCase_ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
), (
lowerCamelCase_
),
) : Any = config_and_inputs
lowerCamelCase_ : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : List[str] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowerCamelCase : str = (LlamaForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Optional[int] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : int = False
lowerCamelCase : Dict = False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = LlamaModelTester(self )
lowerCamelCase_ : str = ConfigTester(self , config_class=A , hidden_size=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[str] = 3
lowerCamelCase_ : Optional[int] = input_dict['''input_ids''']
lowerCamelCase_ : List[str] = input_ids.ne(1 ).to(A )
lowerCamelCase_ : Any = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : Union[str, Any] = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : str = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[str] = 3
lowerCamelCase_ : List[str] = '''single_label_classification'''
lowerCamelCase_ : Dict = input_dict['''input_ids''']
lowerCamelCase_ : Any = input_ids.ne(1 ).to(A )
lowerCamelCase_ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCamelCase_ : int = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : Union[str, Any] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = 3
lowerCamelCase_ : Any = '''multi_label_classification'''
lowerCamelCase_ : Tuple = input_dict['''input_ids''']
lowerCamelCase_ : Union[str, Any] = input_ids.ne(1 ).to(A )
lowerCamelCase_ : Tuple = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCamelCase_ : Dict = LlamaForSequenceClassification(A )
model.to(A )
model.eval()
lowerCamelCase_ : Optional[int] = model(A , attention_mask=A , labels=A )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def UpperCAmelCase__ (self ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_, lowerCamelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = ids_tensor([1, 1_0] , config.vocab_size )
lowerCamelCase_ : Dict = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : Dict = LlamaModel(A )
original_model.to(A )
original_model.eval()
lowerCamelCase_ : Optional[int] = original_model(A ).last_hidden_state
lowerCamelCase_ : Dict = original_model(A ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
lowerCamelCase_ : Tuple = {'''type''': scaling_type, '''factor''': 10.0}
lowerCamelCase_ : Optional[int] = LlamaModel(A )
scaled_model.to(A )
scaled_model.eval()
lowerCamelCase_ : int = scaled_model(A ).last_hidden_state
lowerCamelCase_ : List[Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
@require_torch
class __lowercase ( unittest.TestCase ):
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase_ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
lowerCamelCase_ : Optional[Any] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
lowerCamelCase_ : List[str] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase_ : int = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , A , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase_ : Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
lowerCamelCase_ : Dict = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowerCamelCase_ : List[Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase_ : Any = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , A , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase_ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
lowerCamelCase_ : Optional[Any] = model(torch.tensor(A ) )
# Expected mean on dim = -1
lowerCamelCase_ : int = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
lowerCamelCase_ : Optional[Any] = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , A , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = [1, 3_0_6, 4_6_5_8, 2_7_8, 6_5_9_3, 3_1_0, 2_8_3_4, 3_3_8]
lowerCamelCase_ : Optional[int] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
lowerCamelCase_ : Tuple = model(torch.tensor(A ) )
lowerCamelCase_ : List[str] = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , A , atol=1E-2 , rtol=1E-2 )
# fmt: off
lowerCamelCase_ : Any = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :3_0] , A , atol=1E-5 , rtol=1E-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
lowerCamelCase_ : str = '''Simply put, the theory of relativity states that '''
lowerCamelCase_ : Optional[Any] = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
lowerCamelCase_ : List[str] = tokenizer.encode(A , return_tensors='''pt''' )
lowerCamelCase_ : List[str] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=A )
# greedy generation outputs
lowerCamelCase_ : List[str] = model.generate(A , max_new_tokens=6_4 , top_p=A , temperature=1 , do_sample=A )
lowerCamelCase_ : List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=A )
self.assertEqual(A , A )
| 318
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
if not nums:
return 0
lowerCamelCase_ : Optional[int] = nums[0]
lowerCamelCase_ : Optional[int] = 0
for num in nums[1:]:
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = (
max_excluding + num,
max(_lowercase , _lowercase ),
)
return max(_lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = XLMTokenizer
lowerCamelCase : Optional[Any] = False
def UpperCAmelCase__ (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowerCamelCase_ : List[str] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase_ : Dict = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowerCamelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(A ) )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Tuple = '''lower newer'''
lowerCamelCase_ : int = '''lower newer'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase_ : int = '''lower'''
lowerCamelCase_ : List[Any] = ['''low''', '''er</w>''']
lowerCamelCase_ : Dict = tokenizer.tokenize(A )
self.assertListEqual(A , A )
lowerCamelCase_ : List[Any] = tokens + ['''<unk>''']
lowerCamelCase_ : int = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowerCamelCase_ : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=A )
lowerCamelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A )
lowerCamelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(A )
lowerCamelCase_ : List[str] = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 318
|
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase : List[Any] = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = ["input_features"]
def __init__(self , A=8_0 , A=1_6_0_0_0 , A=1_6_0 , A=3_0 , A=4_0_0 , A=0.0 , A=False , **A , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , return_attention_mask=A , **A , )
lowerCamelCase_ : Dict = n_fft
lowerCamelCase_ : Tuple = hop_length
lowerCamelCase_ : str = chunk_length
lowerCamelCase_ : Optional[Any] = chunk_length * sampling_rate
lowerCamelCase_ : int = self.n_samples // hop_length
lowerCamelCase_ : Dict = sampling_rate
lowerCamelCase_ : Optional[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=A , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Tuple = spectrogram(
A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowerCamelCase_ : str = log_spec[:, :-1]
lowerCamelCase_ : List[str] = np.maximum(A , log_spec.max() - 8.0 )
lowerCamelCase_ : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def UpperCAmelCase__ (A , A , A = 0.0 ):
if attention_mask is not None:
lowerCamelCase_ : Dict = np.array(A , np.intaa )
lowerCamelCase_ : Dict = []
for vector, length in zip(A , attention_mask.sum(-1 ) ):
lowerCamelCase_ : List[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowerCamelCase_ : Optional[int] = padding_value
normed_input_values.append(A )
else:
lowerCamelCase_ : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__(self , A , A = True , A = None , A = None , A = None , A = "max_length" , A = None , A = None , A = None , **A , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCamelCase_ : Optional[Any] = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
lowerCamelCase_ : Optional[int] = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCamelCase_ : List[Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
lowerCamelCase_ : int = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCamelCase_ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCamelCase_ : Optional[Any] = [np.asarray([raw_speech] ).T]
lowerCamelCase_ : str = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowerCamelCase_ : Any = self.pad(
A , padding=A , max_length=max_length if max_length else self.n_samples , truncation=A , pad_to_multiple_of=A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowerCamelCase_ : List[Any] = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowerCamelCase_ : Dict = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowerCamelCase_ : int = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowerCamelCase_ : Any = [self._np_extract_fbank_features(A ) for waveform in input_features[0]]
if isinstance(input_features[0] , A ):
lowerCamelCase_ : List[str] = [np.asarray(A , dtype=np.floataa ) for feature in input_features]
else:
lowerCamelCase_ : Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowerCamelCase_ : List[Any] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowerCamelCase_ : int = padded_inputs.convert_to_tensors(A )
return padded_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 318
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Optional[int] = TransfoXLTokenizer
lowerCamelCase : Any = False
lowerCamelCase : Union[str, Any] = False
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : str = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
lowerCamelCase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
lowerCamelCase_ : Optional[int] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = '''<unk> UNwanted , running'''
lowerCamelCase_ : str = '''<unk> unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A )
lowerCamelCase_ : Dict = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [0, 4, 8, 7] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = TransfoXLTokenizer(lower_case=A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = TransfoXLTokenizer(lower_case=A )
lowerCamelCase_ : Dict = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
lowerCamelCase_ : List[Any] = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(A ) , A )
self.assertEqual(tokenizer.convert_tokens_to_string(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[str] = len(A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 318
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : Tuple = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mobilenet_v2"
def __init__(self , A=3 , A=2_2_4 , A=1.0 , A=8 , A=8 , A=6 , A=3_2 , A=True , A=True , A="relu6" , A=True , A=0.8 , A=0.02 , A=0.0_01 , A=2_5_5 , **A , ):
super().__init__(**A )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
lowerCamelCase_ : Optional[int] = num_channels
lowerCamelCase_ : Tuple = image_size
lowerCamelCase_ : Dict = depth_multiplier
lowerCamelCase_ : Optional[Any] = depth_divisible_by
lowerCamelCase_ : Dict = min_depth
lowerCamelCase_ : Optional[Any] = expand_ratio
lowerCamelCase_ : Tuple = output_stride
lowerCamelCase_ : Dict = first_layer_is_expansion
lowerCamelCase_ : Union[str, Any] = finegrained_output
lowerCamelCase_ : Optional[Any] = hidden_act
lowerCamelCase_ : Tuple = tf_padding
lowerCamelCase_ : List[str] = classifier_dropout_prob
lowerCamelCase_ : List[Any] = initializer_range
lowerCamelCase_ : List[Any] = layer_norm_eps
lowerCamelCase_ : Dict = semantic_loss_ignore_index
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = version.parse("1.11" )
@property
def UpperCAmelCase__ (self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCAmelCase__ (self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCAmelCase__ (self ):
return 1E-4
| 318
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
import requests
def lowercase_ ( _lowercase ) -> dict:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = F"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(_lowercase ).json()
def lowercase_ ( _lowercase = 10 ) -> list[dict]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
lowerCamelCase_ : Tuple = requests.get(_lowercase ).json()[:max_stories]
return [get_hackernews_story(_lowercase ) for story_id in story_ids]
def lowercase_ ( _lowercase = 10 ) -> str:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = hackernews_top_stories(_lowercase )
return "\n".join('''* [{title}]({url})'''.format(**_lowercase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318
| 1
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowercase : int = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def lowercase_ ( _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
inspect_dataset(_lowercase , _lowercase )
lowerCamelCase_ : int = path + '''.py'''
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def lowercase_ ( _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
inspect_metric(_lowercase , _lowercase )
lowerCamelCase_ : int = path + '''.py'''
assert script_name in os.listdir(_lowercase )
assert "__pycache__" not in os.listdir(_lowercase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_dataset_config_info(_lowercase , config_name=_lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(_lowercase ):
get_dataset_config_info(_lowercase , config_name=_lowercase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def lowercase_ ( _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : List[str] = get_dataset_config_names(_lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = get_dataset_infos(_lowercase )
assert list(infos.keys() ) == expected_configs
lowerCamelCase_ : Optional[int] = expected_configs[0]
assert expected_config in infos
lowerCamelCase_ : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = get_dataset_infos(_lowercase )
assert expected_config in infos
lowerCamelCase_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> int:
'''simple docstring'''
with pytest.raises(_lowercase ):
get_dataset_split_names(_lowercase , config_name=_lowercase )
| 318
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318
| 1
|
'''simple docstring'''
from itertools import product
def lowercase_ ( _lowercase , _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : int = sides_number
lowerCamelCase_ : Tuple = max_face_number * dice_number
lowerCamelCase_ : int = [0] * (max_total + 1)
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : Dict = range(_lowercase , max_face_number + 1 )
for dice_numbers in product(_lowercase , repeat=_lowercase ):
lowerCamelCase_ : Optional[Any] = sum(_lowercase )
totals_frequencies[total] += 1
return totals_frequencies
def lowercase_ ( ) -> float:
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ : Tuple = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ : List[Any] = 0
lowerCamelCase_ : Tuple = 9
lowerCamelCase_ : Dict = 4 * 9
lowerCamelCase_ : Any = 6
for peter_total in range(_lowercase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ : str = (4**9) * (6**6)
lowerCamelCase_ : List[str] = peter_wins_count / total_games_number
lowerCamelCase_ : Dict = round(_lowercase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'{solution() = }')
| 318
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318
| 1
|
'''simple docstring'''
__lowercase : Dict = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = [False] * len(_lowercase )
lowerCamelCase_ : Optional[Any] = [s]
lowerCamelCase_ : List[str] = True
while queue:
lowerCamelCase_ : Any = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowercase )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Union[str, Any] = u
return visited[t]
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : str = [-1] * (len(_lowercase ))
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : List[str] = []
lowerCamelCase_ : int = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowercase , _lowercase , _lowercase , _lowercase ):
lowerCamelCase_ : str = float('''Inf''' )
lowerCamelCase_ : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase_ : Dict = min(_lowercase , graph[parent[s]][s] )
lowerCamelCase_ : str = parent[s]
max_flow += path_flow
lowerCamelCase_ : Any = sink
while v != source:
lowerCamelCase_ : Dict = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase_ : Dict = parent[v]
for i in range(len(_lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 318
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318
| 1
|
'''simple docstring'''
def lowercase_ ( _lowercase = 1_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = -1
lowerCamelCase_ : Optional[int] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
lowerCamelCase_ : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a)
lowerCamelCase_ : Tuple = n - a - b
if c * c == (a * a + b * b):
lowerCamelCase_ : Optional[Any] = a * b * c
if candidate >= product:
lowerCamelCase_ : Optional[int] = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 318
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318
| 1
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __lowercase ( _lowercase , _lowercase ):
@register_to_config
def __init__(self , A = 1_2_8 , A = 2_5_6 , A = 20_00.0 , A = 7_6_8 , A = 1_2 , A = 1_2 , A = 6_4 , A = 2_0_4_8 , A = 0.1 , ):
super().__init__()
lowerCamelCase_ : Optional[int] = nn.Sequential(
nn.Linear(A , d_model * 4 , bias=A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=A ) , nn.SiLU() , )
lowerCamelCase_ : Optional[Any] = nn.Embedding(A , A )
lowerCamelCase_ : Optional[Any] = False
lowerCamelCase_ : List[Any] = nn.Linear(A , A , bias=A )
lowerCamelCase_ : List[Any] = nn.Dropout(p=A )
lowerCamelCase_ : List[str] = nn.ModuleList()
for lyr_num in range(A ):
# FiLM conditional T5 decoder
lowerCamelCase_ : List[Any] = DecoderLayer(d_model=A , d_kv=A , num_heads=A , d_ff=A , dropout_rate=A )
self.decoders.append(A )
lowerCamelCase_ : Any = TaLayerNorm(A )
lowerCamelCase_ : Optional[Any] = nn.Dropout(p=A )
lowerCamelCase_ : Optional[int] = nn.Linear(A , A , bias=A )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Tuple = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCamelCase_ : List[str] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCamelCase_ : Any = self.conditioning_emb(A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCamelCase_ : List[str] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCamelCase_ : Tuple = torch.broadcast_to(
torch.arange(A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCamelCase_ : Union[str, Any] = self.position_encoding(A )
lowerCamelCase_ : List[str] = self.continuous_inputs_projection(A )
inputs += position_encodings
lowerCamelCase_ : Dict = self.dropout(A )
# decoder: No padding present.
lowerCamelCase_ : str = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCamelCase_ : Any = [(x, self.encoder_decoder_mask(A , A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCamelCase_ : Tuple = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCamelCase_ : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCamelCase_ : Union[str, Any] = lyr(
A , conditioning_emb=A , encoder_hidden_states=A , encoder_attention_mask=A , )[0]
lowerCamelCase_ : Dict = self.decoder_norm(A )
lowerCamelCase_ : Any = self.post_dropout(A )
lowerCamelCase_ : Optional[int] = self.spec_out(A )
return spec_out
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A , A , A=1E-6 ):
super().__init__()
lowerCamelCase_ : Tuple = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=A , d_kv=A , num_heads=A , dropout_rate=A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=A , d_kv=A , num_heads=A , dropout_rate=A , layer_norm_epsilon=A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=A , d_ff=A , dropout_rate=A , layer_norm_epsilon=A ) )
def UpperCAmelCase__ (self , A , A=None , A=None , A=None , A=None , A=None , ):
lowerCamelCase_ : Dict = self.layer[0](
A , conditioning_emb=A , attention_mask=A , )
if encoder_hidden_states is not None:
lowerCamelCase_ : Dict = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowerCamelCase_ : Dict = self.layer[1](
A , key_value_states=A , attention_mask=A , )
# Apply Film Conditional Feed Forward layer
lowerCamelCase_ : Optional[int] = self.layer[-1](A , A )
return (hidden_states,)
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A ):
super().__init__()
lowerCamelCase_ : Optional[int] = TaLayerNorm(A )
lowerCamelCase_ : List[str] = TaFiLMLayer(in_features=d_model * 4 , out_features=A )
lowerCamelCase_ : str = Attention(query_dim=A , heads=A , dim_head=A , out_bias=A , scale_qk=A )
lowerCamelCase_ : Union[str, Any] = nn.Dropout(A )
def UpperCAmelCase__ (self , A , A=None , A=None , ):
# pre_self_attention_layer_norm
lowerCamelCase_ : int = self.layer_norm(A )
if conditioning_emb is not None:
lowerCamelCase_ : Union[str, Any] = self.FiLMLayer(A , A )
# Self-attention block
lowerCamelCase_ : Optional[int] = self.attention(A )
lowerCamelCase_ : Tuple = hidden_states + self.dropout(A )
return hidden_states
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A , A ):
super().__init__()
lowerCamelCase_ : Union[str, Any] = Attention(query_dim=A , heads=A , dim_head=A , out_bias=A , scale_qk=A )
lowerCamelCase_ : Any = TaLayerNorm(A , eps=A )
lowerCamelCase_ : Dict = nn.Dropout(A )
def UpperCAmelCase__ (self , A , A=None , A=None , ):
lowerCamelCase_ : Tuple = self.layer_norm(A )
lowerCamelCase_ : Optional[Any] = self.attention(
A , encoder_hidden_states=A , attention_mask=attention_mask.squeeze(1 ) , )
lowerCamelCase_ : int = hidden_states + self.dropout(A )
return layer_output
class __lowercase ( nn.Module ):
def __init__(self , A , A , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = TaDenseGatedActDense(d_model=A , d_ff=A , dropout_rate=A )
lowerCamelCase_ : int = TaFiLMLayer(in_features=d_model * 4 , out_features=A )
lowerCamelCase_ : Tuple = TaLayerNorm(A , eps=A )
lowerCamelCase_ : int = nn.Dropout(A )
def UpperCAmelCase__ (self , A , A=None ):
lowerCamelCase_ : List[Any] = self.layer_norm(A )
if conditioning_emb is not None:
lowerCamelCase_ : str = self.film(A , A )
lowerCamelCase_ : Tuple = self.DenseReluDense(A )
lowerCamelCase_ : Tuple = hidden_states + self.dropout(A )
return hidden_states
class __lowercase ( nn.Module ):
def __init__(self , A , A , A ):
super().__init__()
lowerCamelCase_ : Dict = nn.Linear(A , A , bias=A )
lowerCamelCase_ : Optional[int] = nn.Linear(A , A , bias=A )
lowerCamelCase_ : List[Any] = nn.Linear(A , A , bias=A )
lowerCamelCase_ : Tuple = nn.Dropout(A )
lowerCamelCase_ : int = NewGELUActivation()
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = self.act(self.wi_a(A ) )
lowerCamelCase_ : Union[str, Any] = self.wi_a(A )
lowerCamelCase_ : Dict = hidden_gelu * hidden_linear
lowerCamelCase_ : int = self.dropout(A )
lowerCamelCase_ : Optional[int] = self.wo(A )
return hidden_states
class __lowercase ( nn.Module ):
def __init__(self , A , A=1E-6 ):
super().__init__()
lowerCamelCase_ : Union[str, Any] = nn.Parameter(torch.ones(A ) )
lowerCamelCase_ : Union[str, Any] = eps
def UpperCAmelCase__ (self , A ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowerCamelCase_ : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=A )
lowerCamelCase_ : List[str] = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCamelCase_ : List[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __lowercase ( nn.Module ):
def UpperCAmelCase__ (self , A ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(A , 3.0 )) ))
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Union[str, Any] = nn.Linear(A , out_features * 2 , bias=A )
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : Any = self.scale_bias(A )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = torch.chunk(A , 2 , -1 )
lowerCamelCase_ : Optional[Any] = x * (1 + scale) + shift
return x
| 318
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = 1
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = (3_2, 3_2)
lowerCamelCase_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Any = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(A )
@property
def UpperCAmelCase__ (self ):
def extract(*A , **A ):
class __lowercase :
def __init__(self ):
lowerCamelCase_ : Any = torch.ones([0] )
def UpperCAmelCase__ (self , A ):
self.pixel_values.to(A )
return self
return Out()
return extract
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[Any] = self.dummy_cond_unet
lowerCamelCase_ : Any = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : Union[str, Any] = self.dummy_vae
lowerCamelCase_ : List[Any] = self.dummy_text_encoder
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Dict = 7_7
lowerCamelCase_ : Union[str, Any] = self.dummy_image.to(A )
lowerCamelCase_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : int = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Optional[Any] = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : Optional[Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Optional[Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , )
lowerCamelCase_ : int = output.images
lowerCamelCase_ : Union[str, Any] = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=A , return_dict=A , )[0]
lowerCamelCase_ : List[str] = image[0, -3:, -3:, -1]
lowerCamelCase_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCamelCase_ : str = np.array([0.44_27, 0.37_31, 0.42_49, 0.49_41, 0.45_46, 0.41_48, 0.41_93, 0.46_66, 0.44_99] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.dummy_cond_unet
lowerCamelCase_ : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase_ : List[Any] = self.dummy_vae
lowerCamelCase_ : Dict = self.dummy_text_encoder
lowerCamelCase_ : Any = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCamelCase_ : Optional[Any] = 7_7
lowerCamelCase_ : str = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase_ : Optional[int] = unet.half()
lowerCamelCase_ : Dict = vae.half()
lowerCamelCase_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase_ : Dict = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase_ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase_ : Any = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = '''A painting of a squirrel eating a burger'''
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : Optional[int] = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type='''np''' , image=A , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase_ : List[str] = init_image.resize((7_6_0, 5_0_4) )
lowerCamelCase_ : List[Any] = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Dict = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : Any = torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
lowerCamelCase_ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCamelCase_ : Union[str, Any] = np.array([0.93_58, 0.93_97, 0.95_99, 0.99_01, 1.00_00, 1.00_00, 0.98_82, 1.00_00, 1.00_00] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ : List[str] = init_image.resize((7_6_8, 5_1_2) )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowerCamelCase_ : int = '''BAAI/AltDiffusion'''
lowerCamelCase_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Tuple = '''A fantasy landscape, trending on artstation'''
lowerCamelCase_ : List[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Dict = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type='''np''' , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 318
| 1
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__lowercase : Dict = get_tests_dir('''fixtures''')
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# A mock response for an HTTP head request to emulate server down
lowerCamelCase_ : str = mock.Mock()
lowerCamelCase_ : Union[str, Any] = 5_0_0
lowerCamelCase_ : int = {}
lowerCamelCase_ : Optional[Any] = HTTPError
lowerCamelCase_ : Dict = {}
# Download this model to make sure it's in the cache.
lowerCamelCase_ : str = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
lowerCamelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase__ (self ):
# This test is for deprecated behavior and can be removed in v5
lowerCamelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' )
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Any = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-feature-extractor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A , repo_id='''test-feature-extractor''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(F"""{USER}/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token )
lowerCamelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
A , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
CustomFeatureExtractor.register_for_auto_class()
lowerCamelCase_ : Dict = CustomFeatureExtractor.from_pretrained(A )
feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , )
lowerCamelCase_ : Tuple = AutoFeatureExtractor.from_pretrained(
F"""{USER}/test-dynamic-feature-extractor""" , trust_remote_code=A )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
| 318
|
'''simple docstring'''
from itertools import permutations
def lowercase_ ( _lowercase ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase_ : int = [7, 11, 13, 17]
for i, test in enumerate(_lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowercase_ ( _lowercase = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(_lowercase , _lowercase ) ) )
for num in permutations(range(_lowercase ) )
if is_substring_divisible(_lowercase ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowercase ( unittest.TestCase ):
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCamelCase_ : Optional[int] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowerCamelCase_ : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowerCamelCase_ : Tuple = model(A , labels=A ).loss
lowerCamelCase_ : List[str] = -tf.math.reduce_mean(A ).numpy()
lowerCamelCase_ : Union[str, Any] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 318
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = LayoutLMTokenizer
lowerCamelCase : Union[str, Any] = LayoutLMTokenizerFast
lowerCamelCase : Optional[int] = True
lowerCamelCase : int = True
def UpperCAmelCase__ (self ):
super().setUp()
lowerCamelCase_ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def UpperCAmelCase__ (self , **A ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Any = '''UNwant\u00E9d,running'''
lowerCamelCase_ : List[Any] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : Optional[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [7, 4, 5, 1_0, 8, 9] )
def UpperCAmelCase__ (self ):
pass
| 318
| 1
|
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
__lowercase : List[Any] = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['''memory_attention''', '''encoder_attn'''],
['''attention''', '''attn'''],
['''/''', '''.'''],
['''.LayerNorm.gamma''', '''_layer_norm.weight'''],
['''.LayerNorm.beta''', '''_layer_norm.bias'''],
['''r.layer_''', '''r.layers.'''],
['''output_proj''', '''out_proj'''],
['''ffn.dense_1.''', '''fc2.'''],
['''ffn.dense.''', '''fc1.'''],
['''ffn_layer_norm''', '''final_layer_norm'''],
['''kernel''', '''weight'''],
['''encoder_layer_norm.''', '''encoder.layer_norm.'''],
['''decoder_layer_norm.''', '''decoder.layer_norm.'''],
['''embeddings.weights''', '''shared.weight'''],
]
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
lowerCamelCase_ : List[Any] = k.replace(_lowercase , _lowercase )
return k
def lowercase_ ( _lowercase , _lowercase ) -> PegasusForConditionalGeneration:
'''simple docstring'''
lowerCamelCase_ : Any = DEFAULTS.copy()
cfg_kwargs.update(_lowercase )
lowerCamelCase_ : List[Any] = PegasusConfig(**_lowercase )
lowerCamelCase_ : Optional[int] = PegasusForConditionalGeneration(_lowercase )
lowerCamelCase_ : str = torch_model.model.state_dict()
lowerCamelCase_ : Dict = {}
for k, v in tf_weights.items():
lowerCamelCase_ : Union[str, Any] = rename_state_dict_key(_lowercase )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
lowerCamelCase_ : Dict = v.T
lowerCamelCase_ : Optional[int] = torch.tensor(_lowercase , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
lowerCamelCase_ : Optional[int] = torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowerCamelCase_ : Union[str, Any] = mapping['''shared.weight''']
lowerCamelCase_ : str = mapping['''shared.weight''']
lowerCamelCase_ : Optional[Any] = {k: torch.zeros_like(_lowercase ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**_lowercase )
lowerCamelCase_, lowerCamelCase_ : List[Any] = torch_model.model.load_state_dict(_lowercase , strict=_lowercase )
lowerCamelCase_ : str = [
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def lowercase_ ( _lowercase="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = tf.train.list_variables(_lowercase )
lowerCamelCase_ : Dict = {}
lowerCamelCase_ : int = ['''Adafactor''', '''global_step''']
for name, shape in tqdm(_lowercase , desc='''converting tf checkpoint to dict''' ):
lowerCamelCase_ : List[str] = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCamelCase_ : List[str] = tf.train.load_variable(_lowercase , _lowercase )
lowerCamelCase_ : Optional[int] = array
return tf_weights
def lowercase_ ( _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = Path(_lowercase ).parent.name
lowerCamelCase_ : Any = task_specific_params[F"""summarization_{dataset}"""]['''max_position_embeddings''']
lowerCamelCase_ : Tuple = PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=_lowercase )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(_lowercase )
# convert model
lowerCamelCase_ : Union[str, Any] = get_tf_weights_as_numpy(_lowercase )
lowerCamelCase_ : Union[str, Any] = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
lowerCamelCase_ : Union[str, Any] = task_specific_params
lowerCamelCase_ : Tuple = convert_pegasus(_lowercase , _lowercase )
torch_model.save_pretrained(_lowercase )
lowerCamelCase_ : Any = torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(_lowercase , Path(_lowercase ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__lowercase : Tuple = parser.parse_args()
if args.save_dir is None:
__lowercase : List[Any] = Path(args.tf_ckpt_path).parent.name
__lowercase : Dict = os.path.join('''pegasus''', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 318
|
'''simple docstring'''
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __lowercase ( unittest.TestCase ):
@parameterized.expand([(None,), ('''foo.json''',)] )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : List[str] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = AutoConfig.from_pretrained('''gpt2''' )
lowerCamelCase_ : Dict = GenerationConfig.from_model_config(A )
lowerCamelCase_ : Optional[int] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = GenerationConfig()
lowerCamelCase_ : Dict = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
lowerCamelCase_ : int = copy.deepcopy(A )
lowerCamelCase_ : str = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {'''foo''': '''bar'''} )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = GenerationConfig()
lowerCamelCase_ : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(A )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
lowerCamelCase_ : Tuple = GenerationConfig.from_model_config(A )
assert not hasattr(A , '''foo''' ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
lowerCamelCase_ : Tuple = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
lowerCamelCase_ : List[str] = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : Dict = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ (cls ):
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''test-generation-config''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : List[Any] = GenerationConfig.from_pretrained(F"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
lowerCamelCase_ : Optional[Any] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=A , use_auth_token=self._token )
lowerCamelCase_ : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 318
| 1
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowercase : List[str] = logging.get_logger(__name__)
__lowercase : int = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn.grep_linear''': '''encoder.layers.*.attention.gru_rel_pos_linear''',
'''self_attn.relative_attention_bias''': '''encoder.layers.*.attention.rel_attn_embed''',
'''self_attn.grep_a''': '''encoder.layers.*.attention.gru_rel_pos_const''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__lowercase : List[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
for attribute in key.split('''.''' ):
lowerCamelCase_ : int = getattr(_lowercase , _lowercase )
if weight_type is not None:
lowerCamelCase_ : Optional[Any] = getattr(_lowercase , _lowercase ).shape
else:
lowerCamelCase_ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase_ : str = value
elif weight_type == "weight_g":
lowerCamelCase_ : str = value
elif weight_type == "weight_v":
lowerCamelCase_ : Dict = value
elif weight_type == "bias":
lowerCamelCase_ : Union[str, Any] = value
else:
lowerCamelCase_ : Dict = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def lowercase_ ( _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Any = []
lowerCamelCase_ : str = fairseq_model.state_dict()
lowerCamelCase_ : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase_ : Dict = False
if "conv_layers" in name:
load_conv_layer(
_lowercase , _lowercase , _lowercase , _lowercase , hf_model.config.feat_extract_norm == '''group''' , )
lowerCamelCase_ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCamelCase_ : str = True
if "*" in mapped_key:
lowerCamelCase_ : Tuple = name.split(_lowercase )[0].split('''.''' )[-2]
lowerCamelCase_ : Tuple = mapped_key.replace('''*''' , _lowercase )
if "weight_g" in name:
lowerCamelCase_ : Dict = '''weight_g'''
elif "weight_v" in name:
lowerCamelCase_ : Optional[int] = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowerCamelCase_ : Tuple = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCamelCase_ : Dict = '''weight'''
else:
lowerCamelCase_ : int = None
set_recursively(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
continue
if not is_used:
unused_weights.append(_lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : int = full_name.split('''conv_layers.''' )[-1]
lowerCamelCase_ : Union[str, Any] = name.split('''.''' )
lowerCamelCase_ : Any = int(items[0] )
lowerCamelCase_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase_ : str = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase_ : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase_ : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase_ : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_lowercase )
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase=None ) -> int:
'''simple docstring'''
lowerCamelCase_ : List[Any] = torch.load(_lowercase )
lowerCamelCase_ : Optional[Any] = WavLMConfigOrig(checkpoint['''cfg'''] )
lowerCamelCase_ : List[Any] = WavLMOrig(_lowercase )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowerCamelCase_ : Any = WavLMConfig.from_pretrained(_lowercase )
else:
lowerCamelCase_ : List[Any] = WavLMConfig()
lowerCamelCase_ : Union[str, Any] = WavLMModel(_lowercase )
recursively_load_weights(_lowercase , _lowercase )
hf_wavlm.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowercase : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 318
|
'''simple docstring'''
import numpy
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Optional[int] = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase_ : Optional[Any] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase_ : Tuple = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase_ : Dict = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase_ : Optional[int] = numpy.zeros(output_array.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase_ : Optional[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase_ : List[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase_ : Optional[int] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCAmelCase__ (self , A , A , A ):
for iteration in range(1 , iterations + 1 ):
lowerCamelCase_ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase_ : List[str] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Optional[int] = input_arr
lowerCamelCase_ : List[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase_ : Optional[int] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase_ : Union[str, Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_ ( _lowercase ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_ ( ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase_ : Union[str, Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase_ : Dict = TwoHiddenLayerNeuralNetwork(
input_array=_lowercase , output_array=_lowercase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_lowercase , iterations=10 , give_loss=_lowercase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 318
| 1
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase : List[str] = get_tests_dir('''fixtures/test_sentencepiece_no_bos.model''')
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Any = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : Optional[int] = PegasusTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''</s>'''
lowerCamelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(A ) , 1_1_0_3 )
def UpperCAmelCase__ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_1_0_3 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : str = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowerCamelCase_ : Any = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowerCamelCase_ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowerCamelCase_ : Any = [2, 4_1_3, 6_1_5, 1_1_4, 3, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : List[Any] = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6_1_0_3
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_0_3
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_0_5
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_0_2_4
lowerCamelCase_ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowerCamelCase_ : Tuple = [4_1_3, 6_1_5, 1_1_4, 2_2_9_1, 1_9_7_1, 1_1_3, 1_6_7_9, 1_0_7_1_0, 1_0_7, 1]
lowerCamelCase_ : str = tokenizer([raw_input_str] , return_tensors=A ).input_ids[0]
self.assertListEqual(A , A )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ['''This is going to be way too long.''' * 1_5_0, '''short example''']
lowerCamelCase_ : int = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : List[Any] = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Dict = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1_0_2_4)
assert batch.attention_mask.shape == (2, 1_0_2_4)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
@slow
def UpperCAmelCase__ (self ):
# fmt: off
lowerCamelCase_ : int = {'''input_ids''': [[3_8_9_7_9, 1_4_3, 1_8_4_8_5, 6_0_6, 1_3_0, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 5_4_1_8_9, 1_1_2_9, 1_1_1, 2_6_6_6_9, 8_7_6_8_6, 1_2_1, 9_1_1_4, 1_4_7_8_7, 1_2_1, 1_3_2_4_9, 1_5_8, 5_9_2, 9_5_6, 1_2_1, 1_4_6_2_1, 3_1_5_7_6, 1_4_3, 6_2_6_1_3, 1_0_8, 9_6_8_8, 9_3_0, 4_3_4_3_0, 1_1_5_6_2, 6_2_6_1_3, 3_0_4, 1_0_8, 1_1_4_4_3, 8_9_7, 1_0_8, 9_3_1_4, 1_7_4_1_5, 6_3_3_9_9, 1_0_8, 1_1_4_4_3, 7_6_1_4, 1_8_3_1_6, 1_1_8, 4_2_8_4, 7_1_4_8, 1_2_4_3_0, 1_4_3, 1_4_0_0, 2_5_7_0_3, 1_5_8, 1_1_1, 4_2_8_4, 7_1_4_8, 1_1_7_7_2, 1_4_3, 2_1_2_9_7, 1_0_6_4, 1_5_8, 1_2_2, 2_0_4, 3_5_0_6, 1_7_5_4, 1_1_3_3, 1_4_7_8_7, 1_5_8_1, 1_1_5, 3_3_2_2_4, 4_4_8_2, 1_1_1, 1_3_5_5, 1_1_0, 2_9_1_7_3, 3_1_7, 5_0_8_3_3, 1_0_8, 2_0_1_4_7, 9_4_6_6_5, 1_1_1, 7_7_1_9_8, 1_0_7, 1], [1_1_0, 6_2_6_1_3, 1_1_7, 6_3_8, 1_1_2, 1_1_3_3, 1_2_1, 2_0_0_9_8, 1_3_5_5, 7_9_0_5_0, 1_3_8_7_2, 1_3_5, 1_5_9_6, 5_3_5_4_1, 1_3_5_2, 1_4_1, 1_3_0_3_9, 5_5_4_2, 1_2_4, 3_0_2, 5_1_8, 1_1_1, 2_6_8, 2_9_5_6, 1_1_5, 1_4_9, 4_4_2_7, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_3_9, 1_2_3_5, 2_7_9_9, 1_8_2_8_9, 1_7_7_8_0, 2_0_4, 1_0_9, 9_4_7_4, 1_2_9_6, 1_0_7, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : str = PegasusTokenizer
lowerCamelCase : Optional[Any] = PegasusTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : str = True
def UpperCAmelCase__ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase_ : str = PegasusTokenizer(A , offset=0 , mask_token_sent=A , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__ (self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCAmelCase__ (self , **A ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , A ):
return ("This is a test", "This is a test")
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowerCamelCase_ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowerCamelCase_ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
lowerCamelCase_ : int = py_tokenizer([raw_input_str] , return_tensors=A , add_special_tokens=A ).input_ids[0]
self.assertListEqual(A , A )
@require_torch
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = ['''This is going to be way too long.''' * 1_0_0_0, '''short example''']
lowerCamelCase_ : str = ['''not super long but more than 5 tokens''', '''tiny''']
lowerCamelCase_ : Tuple = self._large_tokenizer(A , padding=A , truncation=A , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = self._large_tokenizer(
text_target=A , max_length=5 , padding=A , truncation=A , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4_0_9_6)
assert batch.attention_mask.shape == (2, 4_0_9_6)
assert targets["input_ids"].shape == (2, 5)
assert len(A ) == 2 # input_ids, attention_mask.
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowerCamelCase_ : List[str] = self._large_tokenizer(A ).input_ids
self.assertListEqual(
A , [1_8_2, 1_1_7, 1_4_2, 5_8_7, 4_2_1_1, 1_2_0, 1_1_7, 2_6_3, 1_1_2, 8_0_4, 1_0_9, 8_5_6, 2_5_0_1_6, 3_1_3_7, 4_6_4, 1_0_9, 2_6_9_5_5, 3_1_3_7, 1] , )
| 318
| 1
|
'''simple docstring'''
import math
import qiskit
def lowercase_ ( _lowercase = 1 , _lowercase = 1 , _lowercase = 1 ) -> qiskit.result.counts.Counts:
'''simple docstring'''
if (
isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
or isinstance(_lowercase , _lowercase )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != input_a)
or (math.floor(_lowercase ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
lowerCamelCase_ : str = qiskit.QuantumRegister(4 , '''qr''' )
lowerCamelCase_ : Optional[Any] = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
lowerCamelCase_ : Tuple = [input_a, input_a, carry_in]
lowerCamelCase_ : Optional[int] = qiskit.QuantumCircuit(_lowercase , _lowercase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_lowercase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_lowercase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_lowercase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _lowercase ) # measure the last two qbits
lowerCamelCase_ : List[str] = qiskit.Aer.get_backend('''aer_simulator''' )
lowerCamelCase_ : Any = qiskit.execute(_lowercase , _lowercase , shots=1_000 )
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 318
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase : str = Lock()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCamelCase_ : Dict = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCamelCase_ : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCamelCase_ : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCamelCase_ : Any = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
lowerCamelCase_ : int = []
lowerCamelCase_ : Tuple = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCamelCase_ : Optional[Any] = temp_rs
lowerCamelCase_ : List[str] = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
lowerCamelCase_ : str = Pipe()
lowerCamelCase_ : Any = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCamelCase_ : Dict = temp_rs
lowerCamelCase_ : Tuple = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
lowerCamelCase_ : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowercase_ ( ) -> Any:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
lowerCamelCase_ : Optional[int] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def lowercase_ ( _lowercase , _lowercase ) -> np.ndarray:
'''simple docstring'''
lowerCamelCase_ : List[Any] = np.zeros_like(_lowercase )
lowerCamelCase_ : Union[str, Any] = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCamelCase_ : Tuple = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCamelCase_ : Tuple = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCamelCase_ : str = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
__lowercase : Union[str, Any] = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
__lowercase : Optional[Any] = np.array(Image.open(lena_path))
# kernel to be applied
__lowercase : Optional[int] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__lowercase : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__lowercase : Any = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''')
| 318
|
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
__lowercase : Union[str, Any] = logging.get_logger(__name__)
__lowercase : List[str] = '''Hello, World!'''
__lowercase : Union[str, Any] = '''en_XX'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = Path('''data_bin''' )
lowerCamelCase_ : Dict = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowercase ).parent ) , checkpoint_file=Path(_lowercase ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(_lowercase ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(_lowercase ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(_lowercase )
lowerCamelCase_ : Dict = xmod.model.encoder.sentence_encoder
lowerCamelCase_ : List[Any] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
lowerCamelCase_ : Tuple = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , _lowercase )
lowerCamelCase_ : int = XmodForSequenceClassification(_lowercase ) if classification_head else XmodForMaskedLM(_lowercase )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowerCamelCase_ : Dict = xmod_sent_encoder.embed_tokens.weight
lowerCamelCase_ : str = xmod_sent_encoder.embed_positions.weight
lowerCamelCase_ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
lowerCamelCase_ : Union[str, Any] = xmod_sent_encoder.layernorm_embedding.weight
lowerCamelCase_ : Dict = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowerCamelCase_ : List[str] = model.roberta.encoder.layer[i]
lowerCamelCase_ : int = xmod_sent_encoder.layers[i]
# self attention
lowerCamelCase_ : Dict = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
lowerCamelCase_ : List[Any] = xmod_layer.self_attn.q_proj.weight
lowerCamelCase_ : Optional[int] = xmod_layer.self_attn.q_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn.k_proj.weight
lowerCamelCase_ : Tuple = xmod_layer.self_attn.k_proj.bias
lowerCamelCase_ : str = xmod_layer.self_attn.v_proj.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.self_attn.v_proj.bias
# self-attention output
lowerCamelCase_ : Optional[Any] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
lowerCamelCase_ : List[str] = xmod_layer.self_attn.out_proj.weight
lowerCamelCase_ : int = xmod_layer.self_attn.out_proj.bias
lowerCamelCase_ : Any = xmod_layer.self_attn_layer_norm.weight
lowerCamelCase_ : Dict = xmod_layer.self_attn_layer_norm.bias
# intermediate
lowerCamelCase_ : str = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
lowerCamelCase_ : Tuple = xmod_layer.fca.weight
lowerCamelCase_ : str = xmod_layer.fca.bias
# output
lowerCamelCase_ : Union[str, Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
lowerCamelCase_ : Optional[int] = xmod_layer.fca.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.fca.bias
lowerCamelCase_ : Dict = xmod_layer.final_layer_norm.weight
lowerCamelCase_ : Optional[Any] = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
lowerCamelCase_ : Optional[int] = xmod_layer.adapter_layer_norm.weight
lowerCamelCase_ : Tuple = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
lowerCamelCase_ : List[str] = bert_output.adapter_modules[lang_code]
lowerCamelCase_ : Optional[Any] = xmod_layer.adapter_modules[lang_code]
lowerCamelCase_ : List[Any] = from_adapter.fca.weight
lowerCamelCase_ : str = from_adapter.fca.bias
lowerCamelCase_ : Union[str, Any] = from_adapter.fca.weight
lowerCamelCase_ : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
lowerCamelCase_ : str = xmod_sent_encoder.layer_norm.weight
lowerCamelCase_ : Any = xmod_sent_encoder.layer_norm.bias
if classification_head:
lowerCamelCase_ : Optional[int] = xmod.model.classification_heads['''mnli'''].dense.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''].dense.bias
lowerCamelCase_ : List[Any] = xmod.model.classification_heads['''mnli'''].out_proj.weight
lowerCamelCase_ : str = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowerCamelCase_ : List[str] = xmod.model.encoder.lm_head.dense.weight
lowerCamelCase_ : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
lowerCamelCase_ : Dict = xmod.model.encoder.lm_head.layer_norm.weight
lowerCamelCase_ : Union[str, Any] = xmod.model.encoder.lm_head.layer_norm.bias
lowerCamelCase_ : List[Any] = xmod.model.encoder.lm_head.weight
lowerCamelCase_ : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowerCamelCase_ : Dict = xmod.encode(_lowercase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowercase )
lowerCamelCase_ : Tuple = model(_lowercase )[0]
if classification_head:
lowerCamelCase_ : Union[str, Any] = xmod.model.classification_heads['''mnli'''](xmod.extract_features(_lowercase ) )
else:
lowerCamelCase_ : Union[str, Any] = xmod.model(_lowercase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
lowerCamelCase_ : Any = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
lowerCamelCase_ : Optional[int] = torch.allclose(_lowercase , _lowercase , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(_lowercase ).mkdir(parents=_lowercase , exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : Any = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 318
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Tuple = logging.get_logger(__name__)
__lowercase : Tuple = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : Any = "xmod"
def __init__(self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=2 , A=0.02 , A=1E-12 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=None , A=False , A=2 , A=False , A=True , A=True , A=("en_XX",) , A=None , **A , ):
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : Optional[int] = num_hidden_layers
lowerCamelCase_ : List[Any] = num_attention_heads
lowerCamelCase_ : Tuple = hidden_act
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Dict = hidden_dropout_prob
lowerCamelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCamelCase_ : str = max_position_embeddings
lowerCamelCase_ : List[Any] = type_vocab_size
lowerCamelCase_ : Dict = initializer_range
lowerCamelCase_ : Dict = layer_norm_eps
lowerCamelCase_ : str = position_embedding_type
lowerCamelCase_ : Tuple = use_cache
lowerCamelCase_ : List[Any] = classifier_dropout
lowerCamelCase_ : Dict = pre_norm
lowerCamelCase_ : Union[str, Any] = adapter_reduction_factor
lowerCamelCase_ : Optional[int] = adapter_layer_norm
lowerCamelCase_ : List[str] = adapter_reuse_layer_norm
lowerCamelCase_ : Any = ln_before_adapter
lowerCamelCase_ : Tuple = list(A )
lowerCamelCase_ : int = default_language
class __lowercase ( _lowercase ):
@property
def UpperCAmelCase__ (self ):
if self.task == "multiple-choice":
lowerCamelCase_ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 318
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : str = logging.get_logger(__name__)
__lowercase : Tuple = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class __lowercase ( _lowercase ):
lowerCamelCase : int = "ctrl"
lowerCamelCase : Optional[int] = ["past_key_values"]
lowerCamelCase : Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ):
lowerCamelCase_ : List[str] = vocab_size
lowerCamelCase_ : Optional[Any] = n_positions
lowerCamelCase_ : List[Any] = n_embd
lowerCamelCase_ : Optional[Any] = n_layer
lowerCamelCase_ : Any = n_head
lowerCamelCase_ : int = dff
lowerCamelCase_ : str = resid_pdrop
lowerCamelCase_ : List[Any] = embd_pdrop
lowerCamelCase_ : List[Any] = layer_norm_epsilon
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Dict = use_cache
super().__init__(**A )
| 318
| 1
|
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__lowercase : List[Any] = '''\
@inproceedings{snover-etal-2006-study,
title = "A Study of Translation Edit Rate with Targeted Human Annotation",
author = "Snover, Matthew and
Dorr, Bonnie and
Schwartz, Rich and
Micciulla, Linnea and
Makhoul, John",
booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",
month = aug # " 8-12",
year = "2006",
address = "Cambridge, Massachusetts, USA",
publisher = "Association for Machine Translation in the Americas",
url = "https://aclanthology.org/2006.amta-papers.25",
pages = "223--231",
}
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__lowercase : Tuple = '''\
TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a
hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu
(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found
here: https://github.com/jhclark/tercom.
The implementation here is slightly different from sacrebleu in terms of the required input format. The length of
the references and hypotheses lists need to be the same, so you may need to transpose your references compared to
sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534
See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.
'''
__lowercase : Union[str, Any] = '''
Produces TER scores alongside the number of edits and reference length.
Args:
predictions (list of str): The system stream (a sequence of segments).
references (list of list of str): A list of one or more reference streams (each a sequence of segments).
normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.
support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,
as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.
Only applies if `normalized = True`. Defaults to `False`.
case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.
Returns:
\'score\' (float): TER score (num_edits / sum_ref_lengths * 100)
\'num_edits\' (int): The cumulative number of edits
\'ref_length\' (float): The cumulative average reference length
Examples:
Example 1:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}
Example 2:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... case_sensitive=True)
>>> print(results)
{\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}
Example 3:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... normalized=True,
... case_sensitive=True)
>>> print(results)
{\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}
Example 4:
>>> predictions = ["does this sentence match??",
... "what about this sentence?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}
Example 5:
>>> predictions = ["does this sentence match??",
... "what about this sentence?",
... "What did the TER metric user say to the developer?"]
>>> references = [["does this sentence match", "does this sentence match!?!"],
... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],
... ["Your jokes are...", "...TERrible"]]
>>> ter = datasets.load_metric("ter")
>>> results = ter.compute(predictions=predictions,
... references=references,
... ignore_punct=True,
... case_sensitive=False)
>>> print(results)
{\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def UpperCAmelCase__ (self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def UpperCAmelCase__ (self , A , A , A = False , A = False , A = False , A = False , ):
lowerCamelCase_ : List[Any] = len(references[0] )
if any(len(A ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase_ : Union[str, Any] = [[refs[i] for refs in references] for i in range(A )]
lowerCamelCase_ : Optional[int] = TER(
normalized=A , no_punct=A , asian_support=A , case_sensitive=A , )
lowerCamelCase_ : Union[str, Any] = sb_ter.corpus_score(A , A )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 318
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase ( tf.keras.layers.Layer ):
def __init__(self , A , A , A = None , A = None ):
super().__init__()
lowerCamelCase_ : List[Any] = pad_token_id
lowerCamelCase_ : Union[str, Any] = max_length
lowerCamelCase_ : List[Any] = vocab
lowerCamelCase_ : Optional[int] = merges
lowerCamelCase_ : List[str] = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : int = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()]
lowerCamelCase_ : Dict = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A , *A , **A ):
lowerCamelCase_ : Optional[int] = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase__ (cls , A ):
return cls(**A )
def UpperCAmelCase__ (self ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : str = self.tf_tokenizer(A )
lowerCamelCase_ : Any = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCamelCase_ : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCamelCase_, lowerCamelCase_ : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 318
| 1
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__lowercase : List[Any] = logging.getLogger(__name__)
__lowercase : List[Any] = '''pytorch_model.bin'''
@dataclasses.dataclass
class __lowercase :
lowerCamelCase : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __lowercase :
lowerCamelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowerCamelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_lowercase , metadata={"help": "A csv or a json file containing the validation data."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_lowercase , metadata={"help": "The name of the task to train on."} , )
lowerCamelCase : Optional[List[str]] = dataclasses.field(
default=_lowercase , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __lowercase :
lowerCamelCase : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowerCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=_lowercase , metadata={"help": "Random seed for initialization."} , )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_ : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCamelCase_ : List[str] = dataset.filter(lambda _lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCamelCase_ : Dict = int(eval_result * len(_lowercase ) )
print(_lowercase )
lowerCamelCase_ : Optional[int] = dataset.sort('''probability''' , reverse=_lowercase )
lowerCamelCase_ : List[Any] = dataset.select(range(_lowercase ) )
lowerCamelCase_ : Optional[int] = dataset.remove_columns(['''label''', '''probability'''] )
lowerCamelCase_ : int = dataset.rename_column('''prediction''' , '''label''' )
lowerCamelCase_ : List[Any] = dataset.map(lambda _lowercase : {"label": idalabel[example["label"]]} )
lowerCamelCase_ : int = dataset.shuffle(seed=args.seed )
lowerCamelCase_ : List[Any] = os.path.join(_lowercase , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_lowercase , index=_lowercase )
else:
dataset.to_json(_lowercase )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCamelCase_ : str = STModelArguments(model_name_or_path=_lowercase )
lowerCamelCase_ : Dict = STDataArguments(train_file=_lowercase , infer_file=_lowercase )
lowerCamelCase_ : Dict = STTrainingArguments(output_dir=_lowercase )
lowerCamelCase_ : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowercase ).items():
setattr(_lowercase , _lowercase , _lowercase )
for key, value in kwargs.items():
if hasattr(_lowercase , _lowercase ):
setattr(_lowercase , _lowercase , _lowercase )
# Sanity checks
lowerCamelCase_ : str = {}
lowerCamelCase_ : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCamelCase_ : Dict = args.train_file
lowerCamelCase_ : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCamelCase_ : str = args.eval_file
for key in data_files:
lowerCamelCase_ : Union[str, Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowerCamelCase_ : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowerCamelCase_ : List[str] = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowerCamelCase_ : str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
accelerator.wait_for_everyone()
lowerCamelCase_ : str = None
lowerCamelCase_ : int = None
lowerCamelCase_ : Any = 0
lowerCamelCase_ : str = False
# Show the progress bar
lowerCamelCase_ : Union[str, Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCamelCase_ : Tuple = data_dir_format(_lowercase )
assert os.path.exists(_lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCamelCase_ : Dict = os.path.join(_lowercase , '''stage-1''' )
lowerCamelCase_ : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowercase , _lowercase ):
arguments_dict.update({key: value} )
lowerCamelCase_ : Optional[Any] = os.path.join(_lowercase , '''best-checkpoint''' , _lowercase )
if os.path.exists(_lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , _lowercase , _lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , _lowercase )
finetune(**_lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , _lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCamelCase_ : Optional[int] = os.path.join(_lowercase , '''best-checkpoint''' )
lowerCamelCase_ : Optional[Any] = os.path.join(_lowercase , '''stage-2''' )
# Update arguments_dict
lowerCamelCase_ : int = model_path
lowerCamelCase_ : List[str] = data_files['''train''']
lowerCamelCase_ : List[Any] = current_output_dir
lowerCamelCase_ : Optional[Any] = os.path.join(_lowercase , '''best-checkpoint''' , _lowercase )
if os.path.exists(_lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , _lowercase , _lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , _lowercase )
finetune(**_lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , _lowercase )
lowerCamelCase_ : Tuple = iteration
lowerCamelCase_ : List[str] = data_dir_format(iteration + 1 )
lowerCamelCase_ : Optional[int] = AutoConfig.from_pretrained(os.path.join(_lowercase , '''best-checkpoint''' ) )
lowerCamelCase_ : Any = config.idalabel
lowerCamelCase_ : Dict = os.path.join(_lowercase , '''eval_results_best-checkpoint.json''' )
lowerCamelCase_ : str = os.path.join(_lowercase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(_lowercase )
with open(_lowercase , '''r''' ) as f:
lowerCamelCase_ : Optional[Any] = float(json.load(_lowercase )[args.eval_metric] )
lowerCamelCase_ : Any = os.path.join(_lowercase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(_lowercase )
# Loading the dataset from local csv or json files.
lowerCamelCase_ : Union[str, Any] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
lowerCamelCase_ : int = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(_lowercase , exist_ok=_lowercase )
shutil.copy(_lowercase , os.path.join(_lowercase , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_lowercase ):
shutil.copy(_lowercase , os.path.join(_lowercase , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
accelerator.wait_for_everyone()
lowerCamelCase_ : Any = os.path.join(_lowercase , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCamelCase_ : List[str] = eval_result
if best_iteration is None:
lowerCamelCase_ : Union[str, Any] = new_iteration
lowerCamelCase_ : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCamelCase_ : Dict = new_iteration
lowerCamelCase_ : Any = new_eval_result
lowerCamelCase_ : Tuple = 0
else:
if new_eval_result == best_eval_result:
lowerCamelCase_ : List[str] = new_iteration
lowerCamelCase_ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCamelCase_ : int = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , _lowercase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowercase , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(_lowercase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowercase , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_lowercase , '''eval_results_best-iteration.json''' ) , )
| 318
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318
| 1
|
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowercase :
lowerCamelCase : Tuple = None
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict )
lowerCamelCase_ : Optional[int] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : int = os.path.join(A , '''feat_extract.json''' )
feat_extract_first.to_json_file(A )
lowerCamelCase_ : int = self.feature_extraction_class.from_json_file(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase_ : str = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
lowerCamelCase_ : Any = self.feature_extraction_class.from_pretrained(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.feature_extraction_class()
self.assertIsNotNone(A )
| 318
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
__lowercase : Dict = logging.getLogger(__name__)
@dataclass
class __lowercase :
lowerCamelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
lowerCamelCase : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class __lowercase :
lowerCamelCase : Optional[str] = field(default=_lowercase , metadata={"help": "The input training data file (a text file)."} )
lowerCamelCase : Optional[str] = field(
default=_lowercase , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={"help": "Overwrite the cached training and evaluation sets"} )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={"help": "The number of processes to use for the preprocessing."} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool = field(
default=_lowercase , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowerCamelCase : Optional[int] = field(
default=_lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def UpperCAmelCase__ (self ):
if self.train_file is not None:
lowerCamelCase_ : Optional[Any] = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
lowerCamelCase_ : Optional[Any] = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowercase :
lowerCamelCase : PreTrainedTokenizerBase
lowerCamelCase : Union[bool, str, PaddingStrategy] = True
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__(self , A ):
lowerCamelCase_ : List[str] = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCamelCase_ : str = [feature.pop(A ) for feature in features]
lowerCamelCase_ : Any = len(A )
lowerCamelCase_ : List[Any] = len(features[0]['''input_ids'''] )
lowerCamelCase_ : Union[str, Any] = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
lowerCamelCase_ : str = list(chain(*A ) )
lowerCamelCase_ : Any = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
lowerCamelCase_ : int = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
lowerCamelCase_ : Tuple = torch.tensor(A , dtype=torch.intaa )
return batch
def lowercase_ ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase_ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , _lowercase , _lowercase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCamelCase_ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(_lowercase )
datasets.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.set_verbosity(_lowercase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCamelCase_ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCamelCase_ : str = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
lowerCamelCase_ : Optional[Any] = {}
if data_args.train_file is not None:
lowerCamelCase_ : Union[str, Any] = data_args.train_file
if data_args.validation_file is not None:
lowerCamelCase_ : Tuple = data_args.validation_file
lowerCamelCase_ : Optional[Any] = data_args.train_file.split('''.''' )[-1]
lowerCamelCase_ : Dict = load_dataset(
_lowercase , data_files=_lowercase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
lowerCamelCase_ : Optional[Any] = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ : str = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCamelCase_ : List[Any] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=_lowercase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
lowerCamelCase_ : int = [F"""ending{i}""" for i in range(4 )]
lowerCamelCase_ : List[Any] = '''sent1'''
lowerCamelCase_ : Dict = '''sent2'''
if data_args.max_seq_length is None:
lowerCamelCase_ : str = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
lowerCamelCase_ : Optional[int] = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
lowerCamelCase_ : Optional[int] = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(_lowercase ):
lowerCamelCase_ : Tuple = [[context] * 4 for context in examples[context_name]]
lowerCamelCase_ : List[Any] = examples[question_header_name]
lowerCamelCase_ : Optional[Any] = [
[F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(_lowercase )
]
# Flatten out
lowerCamelCase_ : Optional[Any] = list(chain(*_lowercase ) )
lowerCamelCase_ : List[Any] = list(chain(*_lowercase ) )
# Tokenize
lowerCamelCase_ : List[str] = tokenizer(
_lowercase , _lowercase , truncation=_lowercase , max_length=_lowercase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(_lowercase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCamelCase_ : Union[str, Any] = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCamelCase_ : List[str] = min(len(_lowercase ) , data_args.max_train_samples )
lowerCamelCase_ : List[str] = train_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
lowerCamelCase_ : Dict = train_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCamelCase_ : Optional[int] = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCamelCase_ : Optional[int] = min(len(_lowercase ) , data_args.max_eval_samples )
lowerCamelCase_ : Any = eval_dataset.select(range(_lowercase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
lowerCamelCase_ : Tuple = eval_dataset.map(
_lowercase , batched=_lowercase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
lowerCamelCase_ : int = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=_lowercase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(_lowercase ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = eval_predictions
lowerCamelCase_ : Any = np.argmax(_lowercase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
lowerCamelCase_ : Any = Trainer(
model=_lowercase , args=_lowercase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=_lowercase , data_collator=_lowercase , compute_metrics=_lowercase , )
# Training
if training_args.do_train:
lowerCamelCase_ : int = None
if training_args.resume_from_checkpoint is not None:
lowerCamelCase_ : Dict = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCamelCase_ : List[Any] = last_checkpoint
lowerCamelCase_ : Dict = trainer.train(resume_from_checkpoint=_lowercase )
trainer.save_model() # Saves the tokenizer too for easy upload
lowerCamelCase_ : Any = train_result.metrics
lowerCamelCase_ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowercase )
)
lowerCamelCase_ : List[Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''train''' , _lowercase )
trainer.save_metrics('''train''' , _lowercase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ : str = trainer.evaluate()
lowerCamelCase_ : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowercase )
lowerCamelCase_ : Union[str, Any] = min(_lowercase , len(_lowercase ) )
trainer.log_metrics('''eval''' , _lowercase )
trainer.save_metrics('''eval''' , _lowercase )
lowerCamelCase_ : List[str] = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**_lowercase )
else:
trainer.create_model_card(**_lowercase )
def lowercase_ ( _lowercase ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 318
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__lowercase : Dict = 50000
__lowercase : Dict = 5000
__lowercase , __lowercase : Optional[int] = os.path.split(__file__)
__lowercase : Dict = os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json'''))
@get_duration
def lowercase_ ( _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
for i in range(_lowercase ):
lowerCamelCase_ : Dict = dataset[i]
@get_duration
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> Optional[Any]:
'''simple docstring'''
for i in range(0 , len(_lowercase ) , _lowercase ):
lowerCamelCase_ : Union[str, Any] = dataset[i : i + batch_size]
@get_duration
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
with dataset.formatted_as(type=_lowercase ):
for i in range(_lowercase ):
lowerCamelCase_ : List[str] = dataset[i]
@get_duration
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
with dataset.formatted_as(type=_lowercase ):
for i in range(0 , _lowercase , _lowercase ):
lowerCamelCase_ : Dict = dataset[i : i + batch_size]
def lowercase_ ( ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
lowerCamelCase_ : Any = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
lowerCamelCase_ : Any = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
lowerCamelCase_ : Tuple = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
lowerCamelCase_ : Tuple = generate_example_dataset(
os.path.join(_lowercase , '''dataset.arrow''' ) , _lowercase , num_examples=_lowercase , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(_lowercase ) )
lowerCamelCase_ : Dict = func(_lowercase , **_lowercase )
print('''shuffling dataset''' )
lowerCamelCase_ : Tuple = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(_lowercase ) )
lowerCamelCase_ : Optional[Any] = func(
_lowercase , **_lowercase )
with open(_lowercase , '''wb''' ) as f:
f.write(json.dumps(_lowercase ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 318
|
'''simple docstring'''
from __future__ import annotations
import time
__lowercase : List[Any] = list[tuple[int, int]]
__lowercase : List[Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __lowercase :
def __init__(self , A , A , A , A , A ):
lowerCamelCase_ : Optional[int] = pos_x
lowerCamelCase_ : List[str] = pos_y
lowerCamelCase_ : List[Any] = (pos_y, pos_x)
lowerCamelCase_ : List[str] = goal_x
lowerCamelCase_ : Union[str, Any] = goal_y
lowerCamelCase_ : int = parent
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : Any = Node(start[1] , start[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , A )
lowerCamelCase_ : Union[str, Any] = [self.start]
lowerCamelCase_ : List[str] = False
def UpperCAmelCase__ (self ):
while self.node_queue:
lowerCamelCase_ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCamelCase_ : List[str] = True
return self.retrace_path(A )
lowerCamelCase_ : str = self.get_successors(A )
for node in successors:
self.node_queue.append(A )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : Dict = []
for action in delta:
lowerCamelCase_ : Any = parent.pos_x + action[1]
lowerCamelCase_ : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(A , A , self.target.pos_y , self.target.pos_x , A ) )
return successors
def UpperCAmelCase__ (self , A ):
lowerCamelCase_ : int = node
lowerCamelCase_ : str = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase_ : List[Any] = current_node.parent
path.reverse()
return path
class __lowercase :
def __init__(self , A , A ):
lowerCamelCase_ : List[str] = BreadthFirstSearch(A , A )
lowerCamelCase_ : Any = BreadthFirstSearch(A , A )
lowerCamelCase_ : Union[str, Any] = False
def UpperCAmelCase__ (self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCamelCase_ : List[str] = self.fwd_bfs.node_queue.pop(0 )
lowerCamelCase_ : int = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCamelCase_ : Optional[Any] = True
return self.retrace_bidirectional_path(
A , A )
lowerCamelCase_ : Optional[int] = current_bwd_node
lowerCamelCase_ : List[str] = current_fwd_node
lowerCamelCase_ : List[str] = {
self.fwd_bfs: self.fwd_bfs.get_successors(A ),
self.bwd_bfs: self.bwd_bfs.get_successors(A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ (self , A , A ):
lowerCamelCase_ : List[str] = self.fwd_bfs.retrace_path(A )
lowerCamelCase_ : int = self.bwd_bfs.retrace_path(A )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase_ : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowercase : List[str] = (0, 0)
__lowercase : List[Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase : Tuple = time.time()
__lowercase : int = BreadthFirstSearch(init, goal)
__lowercase : Dict = bfs.search()
__lowercase : Dict = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
__lowercase : int = time.time()
__lowercase : Optional[Any] = BidirectionalBreadthFirstSearch(init, goal)
__lowercase : Any = bd_bfs.search()
__lowercase : Dict = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 318
| 1
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _lowercase ):
lowerCamelCase : Any = (CMStochasticIterativeScheduler,)
lowerCamelCase : Union[str, Any] = 10
def UpperCAmelCase__ (self , **A ):
lowerCamelCase_ : Dict = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.0_02,
'''sigma_max''': 80.0,
}
config.update(**A )
return config
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = 1_0
lowerCamelCase_ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase_ : List[str] = self.scheduler_classes[0](**A )
scheduler.set_timesteps(A )
lowerCamelCase_ : int = scheduler.timesteps[0]
lowerCamelCase_ : List[str] = scheduler.timesteps[1]
lowerCamelCase_ : Dict = self.dummy_sample
lowerCamelCase_ : int = 0.1 * sample
lowerCamelCase_ : Union[str, Any] = scheduler.step(A , A , A ).prev_sample
lowerCamelCase_ : List[Any] = scheduler.step(A , A , A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ (self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase__ (self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : Dict = self.get_scheduler_config()
lowerCamelCase_ : Optional[int] = scheduler_class(**A )
lowerCamelCase_ : Optional[Any] = 1
scheduler.set_timesteps(A )
lowerCamelCase_ : str = scheduler.timesteps
lowerCamelCase_ : int = torch.manual_seed(0 )
lowerCamelCase_ : Any = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(A ):
# 1. scale model input
lowerCamelCase_ : List[Any] = scheduler.scale_model_input(A , A )
# 2. predict noise residual
lowerCamelCase_ : List[Any] = model(A , A )
# 3. predict previous sample x_t-1
lowerCamelCase_ : Optional[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
lowerCamelCase_ : Optional[Any] = pred_prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase_ : str = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1E-2
assert abs(result_mean.item() - 0.25_10 ) < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase_ : Optional[Any] = self.get_scheduler_config()
lowerCamelCase_ : Dict = scheduler_class(**A )
lowerCamelCase_ : Optional[Any] = [1_0_6, 0]
scheduler.set_timesteps(timesteps=A )
lowerCamelCase_ : int = scheduler.timesteps
lowerCamelCase_ : str = torch.manual_seed(0 )
lowerCamelCase_ : int = self.dummy_model()
lowerCamelCase_ : str = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowerCamelCase_ : Optional[Any] = scheduler.scale_model_input(A , A )
# 2. predict noise residual
lowerCamelCase_ : Dict = model(A , A )
# 3. predict previous sample x_t-1
lowerCamelCase_ : Union[str, Any] = scheduler.step(A , A , A , generator=A ).prev_sample
lowerCamelCase_ : List[Any] = pred_prev_sample
lowerCamelCase_ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase_ : Union[str, Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1E-2
assert abs(result_mean.item() - 0.45_27 ) < 1E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = self.scheduler_classes[0]
lowerCamelCase_ : Any = self.get_scheduler_config()
lowerCamelCase_ : Optional[int] = scheduler_class(**A )
lowerCamelCase_ : Optional[Any] = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(A , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.scheduler_classes[0]
lowerCamelCase_ : List[Any] = self.get_scheduler_config()
lowerCamelCase_ : str = scheduler_class(**A )
lowerCamelCase_ : Dict = [3_9, 3_0, 1_2, 1, 0]
lowerCamelCase_ : Any = len(A )
with self.assertRaises(A , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = self.scheduler_classes[0]
lowerCamelCase_ : List[str] = self.get_scheduler_config()
lowerCamelCase_ : List[str] = scheduler_class(**A )
lowerCamelCase_ : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A )
| 318
|
'''simple docstring'''
import numpy as np
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def lowercase_ ( _lowercase ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __lowercase :
lowerCamelCase : CommonSchedulerState
# setable values
lowerCamelCase : jnp.ndarray
lowerCamelCase : jnp.ndarray
lowerCamelCase : Optional[int] = None
@classmethod
def UpperCAmelCase__ (cls , A , A , A ):
return cls(common=A , init_noise_sigma=A , timesteps=A )
@dataclass
class __lowercase ( _lowercase ):
lowerCamelCase : DDPMSchedulerState
class __lowercase ( _lowercase , _lowercase ):
lowerCamelCase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
lowerCamelCase : jnp.dtype
@property
def UpperCAmelCase__ (self ):
return True
@register_to_config
def __init__(self , A = 1_0_0_0 , A = 0.00_01 , A = 0.02 , A = "linear" , A = None , A = "fixed_small" , A = True , A = "epsilon" , A = jnp.floataa , ):
lowerCamelCase_ : List[Any] = dtype
def UpperCAmelCase__ (self , A = None ):
if common is None:
lowerCamelCase_ : List[Any] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCamelCase_ : Any = jnp.array(1.0 , dtype=self.dtype )
lowerCamelCase_ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A , init_noise_sigma=A , timesteps=A , )
def UpperCAmelCase__ (self , A , A , A = None ):
return sample
def UpperCAmelCase__ (self , A , A , A = () ):
lowerCamelCase_ : str = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCamelCase_ : Any = (jnp.arange(0 , A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A , timesteps=A , )
def UpperCAmelCase__ (self , A , A , A=None , A=None ):
lowerCamelCase_ : Optional[Any] = state.common.alphas_cumprod[t]
lowerCamelCase_ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCamelCase_ : str = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCamelCase_ : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCamelCase_ : Dict = jnp.clip(A , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCamelCase_ : Tuple = jnp.log(jnp.clip(A , a_min=1E-20 ) )
elif variance_type == "fixed_large":
lowerCamelCase_ : Optional[Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCamelCase_ : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCamelCase_ : List[Any] = variance
lowerCamelCase_ : List[Any] = state.common.betas[t]
lowerCamelCase_ : Optional[Any] = (predicted_variance + 1) / 2
lowerCamelCase_ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase__ (self , A , A , A , A , A = None , A = True , ):
lowerCamelCase_ : Any = timestep
if key is None:
lowerCamelCase_ : Optional[int] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = jnp.split(A , sample.shape[1] , axis=1 )
else:
lowerCamelCase_ : Dict = None
# 1. compute alphas, betas
lowerCamelCase_ : Union[str, Any] = state.common.alphas_cumprod[t]
lowerCamelCase_ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCamelCase_ : Tuple = 1 - alpha_prod_t
lowerCamelCase_ : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCamelCase_ : List[str] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCamelCase_ : Dict = model_output
elif self.config.prediction_type == "v_prediction":
lowerCamelCase_ : Optional[Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCamelCase_ : int = jnp.clip(A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCamelCase_ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase_ : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCamelCase_ : List[str] = jax.random.split(A , num=1 )
lowerCamelCase_ : Union[str, Any] = jax.random.normal(A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A , A , predicted_variance=A ) ** 0.5) * noise
lowerCamelCase_ : str = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCamelCase_ : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A , state=A )
def UpperCAmelCase__ (self , A , A , A , A , ):
return add_noise_common(state.common , A , A , A )
def UpperCAmelCase__ (self , A , A , A , A , ):
return get_velocity_common(state.common , A , A , A )
def __len__(self ):
return self.config.num_train_timesteps
| 318
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : int = logging.get_logger(__name__)
def lowercase_ ( _lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
lowerCamelCase_ : Optional[Any] = [144, 192, 240]
lowerCamelCase_ : Optional[Any] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
lowerCamelCase_ : List[str] = [96, 120, 144]
lowerCamelCase_ : Union[str, Any] = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
lowerCamelCase_ : Any = [64, 80, 96]
lowerCamelCase_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
lowerCamelCase_ : Union[str, Any] = 0.05
lowerCamelCase_ : Union[str, Any] = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : Optional[Any] = 512
lowerCamelCase_ : Dict = 16
lowerCamelCase_ : Dict = 21
lowerCamelCase_ : List[Any] = '''pascal-voc-id2label.json'''
else:
lowerCamelCase_ : Any = 1_000
lowerCamelCase_ : Dict = '''imagenet-1k-id2label.json'''
lowerCamelCase_ : Optional[Any] = '''huggingface/label-files'''
lowerCamelCase_ : int = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase_ : List[Any] = {int(_lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ : List[str] = idalabel
lowerCamelCase_ : str = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowercase , _lowercase=False ) -> List[str]:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
lowerCamelCase_ : Union[str, Any] = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
lowerCamelCase_ : Optional[Any] = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
lowerCamelCase_ : int = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
lowerCamelCase_ : Dict = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
lowerCamelCase_ : Tuple = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
lowerCamelCase_ : Dict = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
lowerCamelCase_ : List[str] = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : Dict = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
lowerCamelCase_ : str = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
lowerCamelCase_ : str = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
lowerCamelCase_ : List[str] = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
lowerCamelCase_ : Optional[int] = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
lowerCamelCase_ : Optional[Any] = name.replace(F""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if F""".global_rep.{i}.bias""" in name:
lowerCamelCase_ : Any = name.replace(F""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
lowerCamelCase_ : List[str] = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
lowerCamelCase_ : List[str] = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
lowerCamelCase_ : int = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
lowerCamelCase_ : Any = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
lowerCamelCase_ : str = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
lowerCamelCase_ : Optional[int] = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
lowerCamelCase_ : str = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
lowerCamelCase_ : Union[str, Any] = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
lowerCamelCase_ : int = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
lowerCamelCase_ : List[Any] = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
lowerCamelCase_ : Tuple = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
lowerCamelCase_ : Tuple = '''mobilevit.''' + name
return name
def lowercase_ ( _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
if base_model:
lowerCamelCase_ : List[str] = ''''''
else:
lowerCamelCase_ : Any = '''mobilevit.'''
for key in orig_state_dict.copy().keys():
lowerCamelCase_ : Dict = orig_state_dict.pop(_lowercase )
if key[:8] == "encoder.":
lowerCamelCase_ : int = key[8:]
if "qkv" in key:
lowerCamelCase_ : List[Any] = key.split('''.''' )
lowerCamelCase_ : Optional[Any] = int(key_split[0][6:] ) - 1
lowerCamelCase_ : Union[str, Any] = int(key_split[3] )
lowerCamelCase_ : Any = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
lowerCamelCase_ : Dict = layer.transformer.layer[transformer_num].attention.attention.all_head_size
lowerCamelCase_ : Optional[Any] = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
lowerCamelCase_ : List[str] = val[:dim, :]
lowerCamelCase_ : Dict = val[dim : dim * 2, :]
lowerCamelCase_ : Union[str, Any] = val[-dim:, :]
else:
lowerCamelCase_ : List[Any] = val[:dim]
lowerCamelCase_ : Optional[int] = val[dim : dim * 2]
lowerCamelCase_ : int = val[-dim:]
else:
lowerCamelCase_ : int = val
return orig_state_dict
def lowercase_ ( ) -> str:
'''simple docstring'''
lowerCamelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase_ : Optional[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase=False ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : Tuple = get_mobilevit_config(_lowercase )
# load original state_dict
lowerCamelCase_ : int = torch.load(_lowercase , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
lowerCamelCase_ : int = MobileViTForSemanticSegmentation(_lowercase ).eval()
else:
lowerCamelCase_ : int = MobileViTForImageClassification(_lowercase ).eval()
lowerCamelCase_ : Optional[Any] = convert_state_dict(_lowercase , _lowercase )
model.load_state_dict(_lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
lowerCamelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors='''pt''' )
lowerCamelCase_ : Optional[int] = model(**_lowercase )
lowerCamelCase_ : List[str] = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
lowerCamelCase_ : Union[str, Any] = torch.tensor(
[
[[6.20_65, 6.12_92, 6.20_70], [6.10_79, 6.12_54, 6.17_47], [6.00_42, 6.10_71, 6.10_34]],
[[-6.92_53, -6.86_53, -7.03_98], [-7.32_18, -7.39_83, -7.36_70], [-7.19_61, -7.24_82, -7.15_69]],
[[-4.47_23, -4.43_48, -4.37_69], [-5.36_29, -5.46_32, -5.45_98], [-5.15_87, -5.34_02, -5.50_59]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
lowerCamelCase_ : Dict = torch.tensor(
[
[[5.44_49, 5.57_33, 5.63_14], [5.18_15, 5.39_30, 5.59_63], [5.16_56, 5.43_33, 5.48_53]],
[[-9.44_23, -9.77_66, -9.67_14], [-9.15_81, -9.57_20, -9.55_19], [-9.10_06, -9.64_58, -9.57_03]],
[[-7.77_21, -7.37_16, -7.15_83], [-8.45_99, -8.06_24, -7.79_44], [-8.41_72, -7.83_66, -7.50_25]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
lowerCamelCase_ : List[str] = torch.tensor(
[
[[6.98_11, 6.97_43, 7.31_23], [7.17_77, 7.19_31, 7.39_38], [7.56_33, 7.80_50, 7.89_01]],
[[-10.55_36, -10.23_32, -10.29_24], [-10.23_36, -9.86_24, -9.59_64], [-10.88_40, -10.81_58, -10.66_59]],
[[-3.49_38, -3.06_31, -2.86_20], [-3.42_05, -2.81_35, -2.68_75], [-3.41_79, -2.79_45, -2.87_50]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1e-4 )
else:
assert logits.shape == (1, 1_000)
if mobilevit_name == "mobilevit_s":
lowerCamelCase_ : Optional[Any] = torch.tensor([-0.98_66, 0.23_92, -1.12_41] )
elif mobilevit_name == "mobilevit_xs":
lowerCamelCase_ : Tuple = torch.tensor([-2.47_61, -0.93_99, -1.95_87] )
elif mobilevit_name == "mobilevit_xxs":
lowerCamelCase_ : List[Any] = torch.tensor([-1.93_64, -1.23_27, -0.46_53] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _lowercase , atol=1e-4 )
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
lowerCamelCase_ : str = {
'''mobilevit_s''': '''mobilevit-small''',
'''mobilevit_xs''': '''mobilevit-x-small''',
'''mobilevit_xxs''': '''mobilevit-xx-small''',
'''deeplabv3_mobilevit_s''': '''deeplabv3-mobilevit-small''',
'''deeplabv3_mobilevit_xs''': '''deeplabv3-mobilevit-x-small''',
'''deeplabv3_mobilevit_xxs''': '''deeplabv3-mobilevit-xx-small''',
}
print('''Pushing to the hub...''' )
lowerCamelCase_ : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(_lowercase , organization='''apple''' )
model.push_to_hub(_lowercase , organization='''apple''' )
if __name__ == "__main__":
__lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--mobilevit_name''',
default='''mobilevit_s''',
type=str,
help=(
'''Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','''
''' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 318
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowercase ( _lowercase , unittest.TestCase ):
lowerCamelCase : Optional[int] = DanceDiffusionPipeline
lowerCamelCase : Dict = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
lowerCamelCase : Union[str, Any] = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowerCamelCase : int = False
lowerCamelCase : str = False
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : int = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A , use_timestep_embedding=A , time_embedding_type='''fourier''' , mid_block_type='''UNetMidBlock1D''' , down_block_types=('''DownBlock1DNoSkip''', '''DownBlock1D''', '''AttnDownBlock1D''') , up_block_types=('''AttnUpBlock1D''', '''UpBlock1D''', '''UpBlock1DNoSkip''') , )
lowerCamelCase_ : int = IPNDMScheduler()
lowerCamelCase_ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def UpperCAmelCase__ (self , A , A=0 ):
if str(A ).startswith('''mps''' ):
lowerCamelCase_ : Optional[int] = torch.manual_seed(A )
else:
lowerCamelCase_ : List[Any] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase_ : List[str] = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 4,
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : List[str] = self.get_dummy_components()
lowerCamelCase_ : Tuple = DanceDiffusionPipeline(**A )
lowerCamelCase_ : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Any = self.get_dummy_inputs(A )
lowerCamelCase_ : Dict = pipe(**A )
lowerCamelCase_ : int = output.audios
lowerCamelCase_ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCamelCase_ : Union[str, Any] = np.array([-0.72_65, 1.00_00, -0.83_88, 0.11_75, 0.94_98, -1.00_00] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase__ (self ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase__ (self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCAmelCase__ (self ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase__ (self ):
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase__ (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = torch_device
lowerCamelCase_ : int = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' )
lowerCamelCase_ : Optional[Any] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : Any = pipe(generator=A , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
lowerCamelCase_ : int = output.audios
lowerCamelCase_ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase_ : str = np.array([-0.01_92, -0.02_31, -0.03_18, -0.00_59, 0.00_02, -0.00_20] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = torch_device
lowerCamelCase_ : str = DanceDiffusionPipeline.from_pretrained('''harmonai/maestro-150k''' , torch_dtype=torch.floataa )
lowerCamelCase_ : List[str] = pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase_ : List[str] = torch.manual_seed(0 )
lowerCamelCase_ : str = pipe(generator=A , num_inference_steps=1_0_0 , audio_length_in_s=4.0_96 )
lowerCamelCase_ : Optional[Any] = output.audios
lowerCamelCase_ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCamelCase_ : List[str] = np.array([-0.03_67, -0.04_88, -0.07_71, -0.05_25, -0.04_44, -0.03_41] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 318
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase ) -> list[int]: # This function is recursive
'''simple docstring'''
lowerCamelCase_ : Tuple = len(_lowercase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowerCamelCase_ : Union[str, Any] = array[0]
lowerCamelCase_ : Optional[int] = False
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : list[int] = []
while not is_found and i < array_length:
if array[i] < pivot:
lowerCamelCase_ : Optional[int] = True
lowerCamelCase_ : List[str] = [element for element in array[i:] if element >= array[i]]
lowerCamelCase_ : List[str] = longest_subsequence(_lowercase )
if len(_lowercase ) > len(_lowercase ):
lowerCamelCase_ : Any = temp_array
else:
i += 1
lowerCamelCase_ : Optional[int] = [element for element in array[1:] if element >= pivot]
lowerCamelCase_ : str = [pivot, *longest_subsequence(_lowercase )]
if len(_lowercase ) > len(_lowercase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
| 1
|
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( _lowercase , _lowercase ) -> bool:
'''simple docstring'''
lowerCamelCase_ : List[Any] = get_failure_array(_lowercase )
# 2) Step through text searching for pattern
lowerCamelCase_, lowerCamelCase_ : str = 0, 0 # index into text, pattern
while i < len(_lowercase ):
if pattern[j] == text[i]:
if j == (len(_lowercase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase_ : Union[str, Any] = failure[j - 1]
continue
i += 1
return False
def lowercase_ ( _lowercase ) -> list[int]:
'''simple docstring'''
lowerCamelCase_ : int = [0]
lowerCamelCase_ : Tuple = 0
lowerCamelCase_ : str = 1
while j < len(_lowercase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase_ : Optional[Any] = failure[i - 1]
continue
j += 1
failure.append(_lowercase )
return failure
if __name__ == "__main__":
# Test 1)
__lowercase : List[Any] = '''abc1abc12'''
__lowercase : List[str] = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__lowercase : List[Any] = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowercase : Any = '''ABABX'''
__lowercase : Dict = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__lowercase : str = '''AAAB'''
__lowercase : Optional[int] = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__lowercase : List[str] = '''abcdabcy'''
__lowercase : Optional[Any] = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__lowercase : str = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 318
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__lowercase : Dict = logging.get_logger(__name__)
class __lowercase ( _lowercase ):
def __init__(self , *A , **A ):
warnings.warn(
'''The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use OwlViTImageProcessor instead.''' , A , )
super().__init__(*A , **A )
| 318
|
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
__lowercase : Optional[Any] = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
__lowercase : Any = BASE_URL + '''/user'''
# https://github.com/settings/tokens
__lowercase : Any = os.environ.get('''USER_TOKEN''', '''''')
def lowercase_ ( _lowercase ) -> dict[Any, Any]:
'''simple docstring'''
lowerCamelCase_ : str = {
'''Authorization''': F"""token {auth_token}""",
'''Accept''': '''application/vnd.github.v3+json''',
}
return requests.get(_lowercase , headers=_lowercase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 318
| 1
|
'''simple docstring'''
from math import loga
def lowercase_ ( _lowercase ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_lowercase , _lowercase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 318
|
'''simple docstring'''
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class __lowercase ( nn.Module ):
def __init__(self , A , A ):
super().__init__()
lowerCamelCase_ : Tuple = module
lowerCamelCase_ : Any = nn.Sequential(
nn.Linear(module.in_features , A , bias=A ) , nn.Linear(A , module.out_features , bias=A ) , )
lowerCamelCase_ : Optional[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=A )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def UpperCAmelCase__ (self , A , *A , **A ):
return self.module(A , *A , **A ) + self.adapter(A )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCamelCase : Tuple = "bigscience/bloom-1b7"
# Constant values
lowerCamelCase : List[Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCamelCase : int = "Hello my name is"
lowerCamelCase : Tuple = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCamelCase : Optional[int] = 10
def UpperCAmelCase__ (self ):
# Models and tokenizer
lowerCamelCase_ : Optional[Any] = AutoTokenizer.from_pretrained(self.model_name )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# Models and tokenizer
lowerCamelCase_ : Tuple = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='''auto''' )
lowerCamelCase_ : List[str] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_abit.config
self.assertTrue(hasattr(A , '''quantization_config''' ) )
lowerCamelCase_ : Tuple = config.to_dict()
lowerCamelCase_ : Optional[Any] = config.to_diff_dict()
lowerCamelCase_ : Any = config.to_json_string()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
lowerCamelCase_ : str = self.model_fpaa.get_memory_footprint()
lowerCamelCase_ : List[str] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCamelCase_ : Optional[int] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def UpperCAmelCase__ (self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(A , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : Union[str, Any] = self.model_abit.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = BitsAndBytesConfig()
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : str = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : int = model_abit_from_config.generate(
input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = BitsAndBytesConfig()
with self.assertRaises(A ):
lowerCamelCase_ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=A , load_in_abit=A , device_map='''auto''' , bnb_abit_quant_type='''nf4''' , )
def UpperCAmelCase__ (self ):
with self.assertRaises(A ):
# Tries with `str`
self.model_abit.to('''cpu''' )
with self.assertRaises(A ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.to(torch.device('''cuda:0''' ) )
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(A ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' )
lowerCamelCase_ : List[Any] = self.model_fpaa.to(torch.floataa )
lowerCamelCase_ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
# Check this does not throw an error
lowerCamelCase_ : str = self.model_fpaa.to('''cpu''' )
# Check this does not throw an error
lowerCamelCase_ : List[Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCamelCase_ : List[str] = self.model_fpaa.float()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = AutoModelForSeqaSeqLM.from_pretrained('''t5-small''' , load_in_abit=A , device_map='''auto''' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
@classmethod
def UpperCAmelCase__ (cls ):
lowerCamelCase_ : List[Any] = '''t5-small'''
lowerCamelCase_ : Optional[Any] = '''google/flan-t5-small''' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCamelCase_ : Optional[Any] = '''Translate in German: Hello, my dog is cute'''
def UpperCAmelCase__ (self ):
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from transformers import TaForConditionalGeneration
lowerCamelCase_ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCamelCase_ : List[Any] = None
# test with `t5-small`
lowerCamelCase_ : int = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : str = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[Any] = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : int = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Optional[int] = model.generate(**A )
lowerCamelCase_ : Any = modules
def UpperCAmelCase__ (self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCamelCase_ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCamelCase_ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Dict = model.generate(**A )
# test with `flan-t5-small`
lowerCamelCase_ : List[str] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=A , device_map='''auto''' )
lowerCamelCase_ : Dict = self.tokenizer(self.input_text , return_tensors='''pt''' ).to(0 )
lowerCamelCase_ : Tuple = model.generate(**A )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
# model_name
lowerCamelCase_ : Optional[int] = '''bigscience/bloom-560m'''
lowerCamelCase_ : Optional[int] = '''t5-small'''
# Different types of model
lowerCamelCase_ : List[Any] = AutoModel.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Sequence classification model
lowerCamelCase_ : Dict = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=A , device_map='''auto''' )
# CausalLM model
lowerCamelCase_ : int = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A , device_map='''auto''' )
# Seq2seq model
lowerCamelCase_ : int = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=A , device_map='''auto''' )
def UpperCAmelCase__ (self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : int = pipeline(
'''text-generation''' , model=self.model_name , model_kwargs={'''device_map''': '''auto''', '''load_in_4bit''': True, '''torch_dtype''': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCamelCase_ : List[str] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['''generated_text'''] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
super().setUp()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=A , device_map='''balanced''' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCamelCase_ : Any = self.tokenizer(self.input_text , return_tensors='''pt''' )
# Second real batch
lowerCamelCase_ : Any = model_parallel.generate(input_ids=encoded_input['''input_ids'''].to(0 ) , max_new_tokens=1_0 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=A ) , self.EXPECTED_OUTPUTS )
class __lowercase ( _lowercase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = '''facebook/opt-350m'''
super().setUp()
def UpperCAmelCase__ (self ):
if version.parse(importlib.metadata.version('''bitsandbytes''' ) ) < version.parse('''0.37.0''' ):
return
# Step 1: freeze all parameters
lowerCamelCase_ : Any = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=A )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCamelCase_ : List[str] = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCamelCase_ : Optional[int] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(A ) ):
lowerCamelCase_ : Dict = LoRALayer(module.q_proj , rank=1_6 )
lowerCamelCase_ : str = LoRALayer(module.k_proj , rank=1_6 )
lowerCamelCase_ : int = LoRALayer(module.v_proj , rank=1_6 )
# Step 3: dummy batch
lowerCamelCase_ : Union[str, Any] = self.tokenizer('''Test batch ''' , return_tensors='''pt''' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCamelCase_ : Optional[int] = model.forward(**A )
out.logits.norm().backward()
for module in model.modules():
if isinstance(A , A ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(A , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[Any] = "gpt2-xl"
lowerCamelCase : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 318
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowercase ( _lowercase ):
lowerCamelCase : torch.FloatTensor
class __lowercase ( _lowercase , _lowercase ):
@register_to_config
def __init__(self , A = 6_5_5_3_6 , A = None , A = 2 , A = 2 , A = 0 , A = "fourier" , A = True , A = False , A = 0.0 , A = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , A = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , A = "UNetMidBlock1D" , A = None , A = (3_2, 3_2, 6_4) , A = None , A = 8 , A = 1 , A = False , ):
super().__init__()
lowerCamelCase_ : List[str] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase_ : List[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=A , log=A , flip_sin_to_cos=A )
lowerCamelCase_ : List[Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase_ : List[str] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=A , downscale_freq_shift=A )
lowerCamelCase_ : Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase_ : List[Any] = block_out_channels[0] * 4
lowerCamelCase_ : Dict = TimestepEmbedding(
in_channels=A , time_embed_dim=A , act_fn=A , out_dim=block_out_channels[0] , )
lowerCamelCase_ : List[Any] = nn.ModuleList([] )
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Any = nn.ModuleList([] )
lowerCamelCase_ : str = None
# down
lowerCamelCase_ : int = in_channels
for i, down_block_type in enumerate(A ):
lowerCamelCase_ : List[Any] = output_channel
lowerCamelCase_ : Optional[Any] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase_ : Optional[int] = i == len(A ) - 1
lowerCamelCase_ : Optional[int] = get_down_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(A )
# mid
lowerCamelCase_ : List[str] = get_mid_block(
A , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=A , add_downsample=A , )
# up
lowerCamelCase_ : Union[str, Any] = list(reversed(A ) )
lowerCamelCase_ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase_ : Optional[int] = out_channels
else:
lowerCamelCase_ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(A ):
lowerCamelCase_ : Tuple = output_channel
lowerCamelCase_ : str = (
reversed_block_out_channels[i + 1] if i < len(A ) - 1 else final_upsample_channels
)
lowerCamelCase_ : str = i == len(A ) - 1
lowerCamelCase_ : str = get_up_block(
A , num_layers=A , in_channels=A , out_channels=A , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(A )
lowerCamelCase_ : int = output_channel
# out
lowerCamelCase_ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
lowerCamelCase_ : str = get_out_block(
out_block_type=A , num_groups_out=A , embed_dim=block_out_channels[0] , out_channels=A , act_fn=A , fc_dim=block_out_channels[-1] // 4 , )
def UpperCAmelCase__ (self , A , A , A = True , ):
lowerCamelCase_ : Any = timestep
if not torch.is_tensor(A ):
lowerCamelCase_ : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : int = timesteps[None].to(sample.device )
lowerCamelCase_ : Optional[Any] = self.time_proj(A )
if self.config.use_timestep_embedding:
lowerCamelCase_ : Optional[Any] = self.time_mlp(A )
else:
lowerCamelCase_ : str = timestep_embed[..., None]
lowerCamelCase_ : List[str] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase_ : Tuple = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase_ : Union[str, Any] = ()
for downsample_block in self.down_blocks:
lowerCamelCase_, lowerCamelCase_ : Dict = downsample_block(hidden_states=A , temb=A )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase_ : Tuple = self.mid_block(A , A )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase_ : str = down_block_res_samples[-1:]
lowerCamelCase_ : Union[str, Any] = down_block_res_samples[:-1]
lowerCamelCase_ : Dict = upsample_block(A , res_hidden_states_tuple=A , temb=A )
# 5. post-process
if self.out_block:
lowerCamelCase_ : List[str] = self.out_block(A , A )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=A )
| 318
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__lowercase : List[Any] = None
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__lowercase : Optional[Any] = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__lowercase : List[str] = {
'''google/rembert''': 256,
}
__lowercase : List[Any] = '''▁'''
class __lowercase ( _lowercase ):
lowerCamelCase : int = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] = RemBertTokenizer
def __init__(self , A=None , A=None , A=True , A=True , A=False , A="[CLS]" , A="[SEP]" , A="<unk>" , A="[SEP]" , A="<pad>" , A="[CLS]" , A="[MASK]" , **A , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
A , tokenizer_file=A , do_lower_case=A , remove_space=A , keep_accents=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , )
lowerCamelCase_ : Any = do_lower_case
lowerCamelCase_ : Union[str, Any] = remove_space
lowerCamelCase_ : Optional[Any] = keep_accents
lowerCamelCase_ : str = vocab_file
lowerCamelCase_ : str = False if not self.vocab_file else True
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ (self , A , A = None , A = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1]
def UpperCAmelCase__ (self , A , A = None ):
lowerCamelCase_ : int = [self.sep_token_id]
lowerCamelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ (self , A , A = None ):
if not os.path.isdir(A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(A ) )
return
lowerCamelCase_ : Dict = os.path.join(
A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 318
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Union[str, Any] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[Any] = "transfo-xl"
lowerCamelCase : Optional[int] = ["mems"]
lowerCamelCase : str = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__(self , A=2_6_7_7_3_5 , A=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , A=1_0_2_4 , A=1_0_2_4 , A=1_6 , A=6_4 , A=4_0_9_6 , A=4 , A=False , A=1_8 , A=1_6_0_0 , A=1_0_0_0 , A=True , A=True , A=0 , A=-1 , A=True , A=0.1 , A=0.0 , A=True , A="normal" , A=0.01 , A=0.01 , A=0.02 , A=1E-5 , A=0 , **A , ):
lowerCamelCase_ : List[Any] = vocab_size
lowerCamelCase_ : Dict = []
self.cutoffs.extend(A )
if proj_share_all_but_first:
lowerCamelCase_ : Any = [False] + [True] * len(self.cutoffs )
else:
lowerCamelCase_ : Optional[Any] = [False] + [False] * len(self.cutoffs )
lowerCamelCase_ : Optional[int] = d_model
lowerCamelCase_ : int = d_embed
lowerCamelCase_ : Optional[Any] = d_head
lowerCamelCase_ : Any = d_inner
lowerCamelCase_ : Tuple = div_val
lowerCamelCase_ : Union[str, Any] = pre_lnorm
lowerCamelCase_ : Any = n_layer
lowerCamelCase_ : Tuple = n_head
lowerCamelCase_ : List[str] = mem_len
lowerCamelCase_ : Dict = same_length
lowerCamelCase_ : Optional[Any] = attn_type
lowerCamelCase_ : Tuple = clamp_len
lowerCamelCase_ : List[Any] = sample_softmax
lowerCamelCase_ : str = adaptive
lowerCamelCase_ : Tuple = dropout
lowerCamelCase_ : Dict = dropatt
lowerCamelCase_ : Optional[int] = untie_r
lowerCamelCase_ : int = init
lowerCamelCase_ : List[str] = init_range
lowerCamelCase_ : List[Any] = proj_init_std
lowerCamelCase_ : Any = init_std
lowerCamelCase_ : List[Any] = layer_norm_epsilon
super().__init__(eos_token_id=A , **A )
@property
def UpperCAmelCase__ (self ):
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def UpperCAmelCase__ (self , A ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 318
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..utils import _LazyModule
__lowercase : Union[str, Any] = {
'''config''': [
'''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''',
'''OnnxConfig''',
'''OnnxConfigWithPast''',
'''OnnxSeq2SeqConfigWithPast''',
'''PatchingSpec''',
],
'''convert''': ['''export''', '''validate_model_outputs'''],
'''features''': ['''FeaturesManager'''],
'''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 318
|
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.