code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """wavlm"""
def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=0.1 , __A=0.02 , __A=1E-5 , __A="group" , __A="gelu" , __A=(512, 512, 512, 512, 512, 512, 512) , __A=(5, 2, 2, 2, 2, 2, 2) , __A=(10, 3, 3, 3, 3, 2, 2) , __A=False , __A=128 , __A=16 , __A=320 , __A=800 , __A=False , __A=True , __A=0.05 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=320 , __A=2 , __A=0.1 , __A=100 , __A=256 , __A=256 , __A=0.1 , __A="mean" , __A=False , __A=False , __A=256 , __A=(512, 512, 512, 512, 1500) , __A=(5, 3, 3, 1, 1) , __A=(1, 2, 3, 1, 1) , __A=512 , __A=80 , __A=0 , __A=1 , __A=2 , __A=False , __A=3 , __A=2 , __A=3 , __A=None , **__A , ):
super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__a = hidden_size
__a = feat_extract_norm
__a = feat_extract_activation
__a = list(__UpperCAmelCase )
__a = list(__UpperCAmelCase )
__a = list(__UpperCAmelCase )
__a = conv_bias
__a = num_buckets
__a = max_bucket_distance
__a = num_conv_pos_embeddings
__a = num_conv_pos_embedding_groups
__a = len(self.conv_dim )
__a = num_hidden_layers
__a = intermediate_size
__a = hidden_act
__a = num_attention_heads
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = feat_proj_dropout
__a = final_dropout
__a = layerdrop
__a = layer_norm_eps
__a = initializer_range
__a = num_ctc_classes
__a = vocab_size
__a = do_stable_layer_norm
__a = use_weighted_layer_sum
__a = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__a = apply_spec_augment
__a = mask_time_prob
__a = mask_time_length
__a = mask_time_min_masks
__a = mask_feature_prob
__a = mask_feature_length
# parameters for pretraining with codevector quantized representations
__a = num_codevectors_per_group
__a = num_codevector_groups
__a = contrastive_logits_temperature
__a = num_negatives
__a = codevector_dim
__a = proj_codevector_dim
__a = diversity_loss_weight
# ctc loss
__a = ctc_loss_reduction
__a = ctc_zero_infinity
# adapter
__a = add_adapter
__a = adapter_kernel_size
__a = adapter_stride
__a = num_adapter_layers
__a = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__a = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__a = list(__UpperCAmelCase )
__a = list(__UpperCAmelCase )
__a = list(__UpperCAmelCase )
__a = xvector_output_dim
@property
def snake_case_ ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 99 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__A = logging.get_logger(__name__)
__A = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = """deberta-v2"""
def __init__( self , __UpperCAmelCase=1_2_8_1_0_0 , __UpperCAmelCase=1_5_3_6 , __UpperCAmelCase=2_4 , __UpperCAmelCase=2_4 , __UpperCAmelCase=6_1_4_4 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_1_2 , __UpperCAmelCase=0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-7 , __UpperCAmelCase=False , __UpperCAmelCase=-1 , __UpperCAmelCase=0 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=0 , __UpperCAmelCase="gelu" , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :Optional[int] = num_hidden_layers
lowerCAmelCase__ :Optional[int] = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :Dict = hidden_act
lowerCAmelCase__ :Tuple = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[Any] = max_position_embeddings
lowerCAmelCase__ :Optional[Any] = type_vocab_size
lowerCAmelCase__ :List[Any] = initializer_range
lowerCAmelCase__ :Dict = relative_attention
lowerCAmelCase__ :Union[str, Any] = max_relative_positions
lowerCAmelCase__ :List[str] = pad_token_id
lowerCAmelCase__ :Optional[int] = position_biased_input
# Backwards compatibility
if type(__UpperCAmelCase ) == str:
lowerCAmelCase__ :Dict = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCAmelCase__ :str = pos_att_type
lowerCAmelCase__ :Dict = vocab_size
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[Any] = kwargs.get('pooler_hidden_size' , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = pooler_dropout
lowerCAmelCase__ :int = pooler_hidden_act
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :str = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :Tuple = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1_2
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = 3 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = 4_0 , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = super().generate_dummy_inputs(preprocessor=__UpperCAmelCase , framework=__UpperCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 93 | 0 |
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __a ( __lowerCamelCase : Optional[int]=None ) -> List[Any]:
'''simple docstring'''
lowercase_ = argparse.ArgumentParser(add_help=__lowerCamelCase , allow_abbrev=__lowerCamelCase )
# The main config parser
lowercase_ = config_command_parser(__lowerCamelCase )
# The subparser to add commands to
lowercase_ = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(__lowerCamelCase , parents=[parent_parser] )
update_command_parser(__lowerCamelCase , parents=[parent_parser] )
return config_parser
def __a ( ) -> List[str]:
'''simple docstring'''
lowercase_ = get_config_parser()
lowercase_ = config_parser.parse_args()
if not hasattr(__lowerCamelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(__lowerCamelCase )
if __name__ == "__main__":
main()
| 704 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 461 | 0 |
from __future__ import annotations
import numpy as np
def __lowerCAmelCase ( __magic_name__ ):
_lowercase: Union[str, Any] = np.shape(lowercase_ )
if rows != columns:
_lowercase: Optional[int] = (
'''\'table\' has to be of square shaped array but got a '''
f"{rows}x{columns} array:\n{table}"
)
raise ValueError(lowercase_ )
_lowercase: str = np.zeros((rows, columns) )
_lowercase: List[Any] = np.zeros((rows, columns) )
for i in range(lowercase_ ):
for j in range(lowercase_ ):
_lowercase: Tuple = sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) )
if upper[j][j] == 0:
raise ArithmeticError("No LU decomposition exists" )
_lowercase: Union[str, Any] = (table[i][j] - total) / upper[j][j]
_lowercase: List[str] = 1
for j in range(lowercase_ , lowercase_ ):
_lowercase: Dict = sum(lower[i][k] * upper[k][j] for k in range(lowercase_ ) )
_lowercase: Optional[Any] = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 226 |
def A_ ( lowercase_ ) -> bool:
_snake_case : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def A_ ( lowercase_ = 5000 ) -> int:
_snake_case : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase_ )]
for i, pentagonal_i in enumerate(lowercase_ ):
for j in range(lowercase_ , len(lowercase_ ) ):
_snake_case : Optional[int] = pentagonal_nums[j]
_snake_case : Tuple = pentagonal_i + pentagonal_j
_snake_case : int = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase_ ) and is_pentagonal(lowercase_ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = BertConfig.from_json_file(_A )
print(f"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ = BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(f"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowerCamelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase_ : Dict = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 265 |
'''simple docstring'''
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
lowerCamelCase_ : Union[str, Any] = re.compile(r'''\b(a|an|the)\b''', re.UNICODE)
lowerCamelCase_ : int = None
def __magic_name__( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=_A , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=_A , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def __magic_name__( _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def __magic_name__( _A ):
'''simple docstring'''
def remove_articles(_A ):
return ARTICLES_REGEX.sub(""" """ , _A )
def white_space_fix(_A ):
return " ".join(text.split() )
def remove_punc(_A ):
UpperCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_A ) ) ) )
def __magic_name__( _A ):
'''simple docstring'''
if not s:
return []
return normalize_answer(_A ).split()
def __magic_name__( _A , _A ):
'''simple docstring'''
return int(normalize_answer(_A ) == normalize_answer(_A ) )
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = get_tokens(_A )
UpperCamelCase__ = collections.Counter(_A ) & collections.Counter(_A )
UpperCamelCase__ = sum(common.values() )
if len(_A ) == 0 or len(_A ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = 1.0 * num_same / len(_A )
UpperCamelCase__ = (2 * precision * recall) / (precision + recall)
return fa
def __magic_name__( _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
UpperCamelCase__ = qa["""id"""]
UpperCamelCase__ = [t for t in qa["""answers"""]["""text"""] if normalize_answer(_A )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
UpperCamelCase__ = [""""""]
if qid not in preds:
print(f"Missing prediction for {qid}" )
continue
UpperCamelCase__ = preds[qid]
# Take max over all gold answers
UpperCamelCase__ = max(compute_exact(_A , _A ) for a in gold_answers )
UpperCamelCase__ = max(compute_fa(_A , _A ) for a in gold_answers )
return exact_scores, fa_scores
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = {}
for qid, s in scores.items():
UpperCamelCase__ = na_probs[qid] > na_prob_thresh
if pred_na:
UpperCamelCase__ = float(not qid_to_has_ans[qid] )
else:
UpperCamelCase__ = s
return new_scores
def __magic_name__( _A , _A , _A=None ):
'''simple docstring'''
if not qid_list:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
UpperCamelCase__ = len(_A )
return collections.OrderedDict(
[
("""exact""", 1_0_0.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_0_0.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def __magic_name__( _A , _A , _A ):
'''simple docstring'''
for k in new_eval:
UpperCamelCase__ = new_eval[k]
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
plt.step(_A , _A , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(_A , _A , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.0_5] )
plt.ylim([0.0, 1.0_5] )
plt.title(_A )
plt.savefig(_A )
plt.clf()
def __magic_name__( _A , _A , _A , _A , _A=None , _A=None ):
'''simple docstring'''
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.0
UpperCamelCase__ = [1.0]
UpperCamelCase__ = [0.0]
UpperCamelCase__ = 0.0
for i, qid in enumerate(_A ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
UpperCamelCase__ = true_pos / float(i + 1 )
UpperCamelCase__ = true_pos / float(_A )
if i == len(_A ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(_A )
recalls.append(_A )
if out_image:
plot_pr_curve(_A , _A , _A , _A )
return {"ap": 1_0_0.0 * avg_prec}
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
if out_image_dir and not os.path.exists(_A ):
os.makedirs(_A )
UpperCamelCase__ = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
UpperCamelCase__ = {k: float(_A ) for k, v in qid_to_has_ans.items()}
UpperCamelCase__ = make_precision_recall_eval(
_A , _A , _A , _A , out_image=os.path.join(_A , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(_A , _A , """pr_exact""" )
merge_eval(_A , _A , """pr_f1""" )
merge_eval(_A , _A , """pr_oracle""" )
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
if not qid_list:
return
UpperCamelCase__ = [na_probs[k] for k in qid_list]
UpperCamelCase__ = np.ones_like(_A ) / float(len(_A ) )
plt.hist(_A , weights=_A , bins=20 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(f"Histogram of no-answer probability: {name}" )
plt.savefig(os.path.join(_A , f"na_prob_hist_{name}.png" ) )
plt.clf()
def __magic_name__( _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
UpperCamelCase__ = num_no_ans
UpperCamelCase__ = cur_score
UpperCamelCase__ = 0.0
UpperCamelCase__ = sorted(_A , key=lambda _A : na_probs[k] )
for i, qid in enumerate(_A ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
UpperCamelCase__ = scores[qid]
else:
if preds[qid]:
UpperCamelCase__ = -1
else:
UpperCamelCase__ = 0
cur_score += diff
if cur_score > best_score:
UpperCamelCase__ = cur_score
UpperCamelCase__ = na_probs[qid]
return 1_0_0.0 * best_score / len(_A ), best_thresh
def __magic_name__( _A , _A , _A , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ , UpperCamelCase__ = find_best_thresh(_A , _A , _A , _A )
UpperCamelCase__ = best_exact
UpperCamelCase__ = exact_thresh
UpperCamelCase__ = best_fa
UpperCamelCase__ = fa_thresh
def __magic_name__( ):
'''simple docstring'''
with open(OPTS.data_file ) as f:
UpperCamelCase__ = json.load(_A )
UpperCamelCase__ = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
UpperCamelCase__ = json.load(_A )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
UpperCamelCase__ = json.load(_A )
else:
UpperCamelCase__ = {k: 0.0 for k in preds}
UpperCamelCase__ = make_qid_to_has_ans(_A ) # maps qid to True/False
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if v]
UpperCamelCase__ = [k for k, v in qid_to_has_ans.items() if not v]
UpperCamelCase__ , UpperCamelCase__ = get_raw_scores(_A , _A )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = apply_no_ans_threshold(_A , _A , _A , OPTS.na_prob_thresh )
UpperCamelCase__ = make_eval_dict(_A , _A )
if has_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """HasAns""" )
if no_ans_qids:
UpperCamelCase__ = make_eval_dict(_A , _A , qid_list=_A )
merge_eval(_A , _A , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(_A , _A , _A , _A , _A , _A )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(_A , _A , _A , _A , _A , OPTS.out_image_dir )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(_A , _A , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(_A , _A )
else:
print(json.dumps(_A , indent=2 ) )
if __name__ == "__main__":
lowerCamelCase_ : str = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 265 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
a_ :List[Any] = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
a_ :Union[str, Any] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
a_ :Union[str, Any] = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def a ( A__ , A__ ) -> Optional[Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def a ( A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = float(fa_score(y_true=_UpperCAmelCase , y_pred=_UpperCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def a ( A__ , A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = np.array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = np.array(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = en_sentvecs.shape[0]
# mean centering
SCREAMING_SNAKE_CASE__ : Any = en_sentvecs - np.mean(_UpperCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = in_sentvecs - np.mean(_UpperCAmelCase , axis=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = cdist(_UpperCAmelCase , _UpperCAmelCase , '''cosine''' )
SCREAMING_SNAKE_CASE__ : List[str] = np.array(range(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : List[str] = sim.argsort(axis=1 )[:, :1_0]
SCREAMING_SNAKE_CASE__ : Dict = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : Tuple ):
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def lowercase__ ( self : Union[str, Any] , _lowercase : List[Any] , _lowercase : int ):
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 35 |
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> float:
return math.pow(_UpperCAmelCase , 2 ) - a
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
return 2 * x
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> float:
_a = 2.0
while start <= a:
_a = math.pow(_UpperCAmelCase , 2 )
return start
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase = 9999 , _UpperCAmelCase = 0.00000000000001 ) -> float:
if a < 0:
raise ValueError('math domain error' )
_a = get_initial_point(_UpperCAmelCase )
for _ in range(_UpperCAmelCase ):
_a = value
_a = value - fx(_UpperCAmelCase , _UpperCAmelCase ) / fx_derivative(_UpperCAmelCase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 562 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowercase_ = random.Random()
def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any=1.0 , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Dict=None ) -> List[str]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any],lowercase_ : Tuple,lowercase_ : str=7,lowercase_ : Tuple=4_0_0,lowercase_ : Any=2_0_0_0,lowercase_ : int=2_0_4_8,lowercase_ : Optional[int]=1_2_8,lowercase_ : Tuple=1,lowercase_ : Dict=5_1_2,lowercase_ : Union[str, Any]=3_0,lowercase_ : Optional[int]=4_4_1_0_0,)-> str:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self : Optional[int],lowercase_ : int=False,lowercase_ : Tuple=False )-> Optional[Any]:
'''simple docstring'''
def _flatten(lowercase_ : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length,self.max_seq_length,self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = TvltFeatureExtractor
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = TvltFeatureExtractionTester(self )
def snake_case__ ( self : Union[str, Any] )-> Optional[Any]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase,'spectrogram_length' ) )
self.assertTrue(hasattr(__UpperCamelCase,'feature_size' ) )
self.assertTrue(hasattr(__UpperCamelCase,'num_audio_channels' ) )
self.assertTrue(hasattr(__UpperCamelCase,'hop_length' ) )
self.assertTrue(hasattr(__UpperCamelCase,'chunk_length' ) )
self.assertTrue(hasattr(__UpperCamelCase,'sampling_rate' ) )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
A__ = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__UpperCamelCase,__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase,__UpperCamelCase )
def snake_case__ ( self : Optional[int] )-> Optional[int]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(__UpperCamelCase,'feat_extract.json' )
feat_extract_first.to_json_file(__UpperCamelCase )
A__ = self.feature_extraction_class.from_json_file(__UpperCamelCase )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop('mel_filters' )
A__ = dict_second.pop('mel_filters' )
self.assertTrue(np.allclose(__UpperCamelCase,__UpperCamelCase ) )
self.assertEqual(__UpperCamelCase,__UpperCamelCase )
def snake_case__ ( self : int )-> Optional[Any]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(8_0_0,1_4_0_0,2_0_0 )]
A__ = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0],return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(__UpperCamelCase,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
__UpperCamelCase,return_tensors='np',sampling_rate=4_4_1_0_0,mask_audio=__UpperCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
A__ = np.asarray(__UpperCamelCase )
A__ = feature_extractor(__UpperCamelCase,return_tensors='np',sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self : Dict,lowercase_ : Optional[Any] )-> Union[str, Any]:
'''simple docstring'''
A__ = load_dataset('hf-internal-testing/librispeech_asr_dummy','clean',split='validation' )
# automatic decoding with librispeech
A__ = ds.sort('id' ).select(range(__UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def snake_case__ ( self : Dict )-> str:
'''simple docstring'''
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(__UpperCamelCase,return_tensors='pt' ).audio_values
self.assertEquals(audio_values.shape,(1, 1, 1_9_2, 1_2_8) )
A__ = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2],__UpperCamelCase,atol=1E-4 ) )
| 715 |
from __future__ import annotations
from fractions import Fraction
def _snake_case( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> list[str]:
'''simple docstring'''
A__ = []
A__ = 11
A__ = int('1' + '0' * digit_len )
for num in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
solutions.append(f'{num}/{den}' )
den += 1
num += 1
A__ = 10
return solutions
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 2 ) -> int:
'''simple docstring'''
A__ = 1.0
for fraction in fraction_list(SCREAMING_SNAKE_CASE__ ):
A__ = Fraction(SCREAMING_SNAKE_CASE__ )
result *= frac.denominator / frac.numerator
return int(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution())
| 586 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A = TypeVar("""KT""")
__A = TypeVar("""VT""")
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = "root" , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = key
lowerCAmelCase__ :List[Any] = value
lowerCAmelCase__ :list[Node[KT, VT]] = []
def __repr__( self ):
'''simple docstring'''
return F"Node({self.key}: {self.value})"
@property
def snake_case ( self ):
'''simple docstring'''
return len(self.forward )
class _lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase = 0.5 , __UpperCAmelCase = 1_6 ):
'''simple docstring'''
lowerCAmelCase__ :Node[KT, VT] = Node[KT, VT]()
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Tuple = p
lowerCAmelCase__ :List[Any] = max_level
def __str__( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = list(self )
if len(__UpperCAmelCase ) == 0:
return F"SkipList(level={self.level})"
lowerCAmelCase__ :Union[str, Any] = max((len(str(__UpperCAmelCase ) ) for item in items) , default=4 )
lowerCAmelCase__ :Any = max(__UpperCAmelCase , 4 ) + 4
lowerCAmelCase__ :Tuple = self.head
lowerCAmelCase__ :Any = []
lowerCAmelCase__ :List[Any] = node.forward.copy()
lines.append(F"[{node.key}]".ljust(__UpperCAmelCase , '-' ) + '* ' * len(__UpperCAmelCase ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
while len(node.forward ) != 0:
lowerCAmelCase__ :Dict = node.forward[0]
lines.append(
F"[{node.key}]".ljust(__UpperCAmelCase , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__UpperCAmelCase ) )
lowerCAmelCase__ :Union[str, Any] = node.forward
lines.append('None'.ljust(__UpperCAmelCase ) + '* ' * len(__UpperCAmelCase ) )
return F"SkipList(level={self.level})\n" + "\n".join(__UpperCAmelCase )
def __iter__( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
lowerCAmelCase__ :str = node.forward[0]
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = []
lowerCAmelCase__ :Tuple = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
lowerCAmelCase__ :Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__UpperCAmelCase )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = self._locate_node(__UpperCAmelCase )
if node is not None:
for i, update_node in enumerate(__UpperCAmelCase ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
lowerCAmelCase__ :Optional[Any] = node.forward[i]
else:
lowerCAmelCase__ :str = update_node.forward[:i]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self._locate_node(__UpperCAmelCase )
if node is not None:
lowerCAmelCase__ :Tuple = value
else:
lowerCAmelCase__ :Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , __UpperCAmelCase ):
update_vector.append(self.head )
lowerCAmelCase__ :int = level
lowerCAmelCase__ :int = Node(__UpperCAmelCase , __UpperCAmelCase )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__UpperCAmelCase )
else:
lowerCAmelCase__ :Dict = new_node
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self._locate_node(__UpperCAmelCase )
if node is not None:
return node.value
return None
def __A () ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
lowerCAmelCase__ :List[str] = skip_list.head
lowerCAmelCase__ :Any = {}
while node.level != 0:
lowerCAmelCase__ :Optional[Any] = node.forward[0]
lowerCAmelCase__ :Optional[int] = node.value
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A () ->Dict:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
lowerCAmelCase__ :str = skip_list.head
lowerCAmelCase__ :List[Any] = {}
while node.level != 0:
lowerCAmelCase__ :int = node.forward[0]
lowerCAmelCase__ :str = node.value
if len(_SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(_SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :List[Any] = SkipList()
assert skip_list.find('Some key' ) is None
def __A () ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def __A () ->Tuple:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def __A () ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :str = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(_SCREAMING_SNAKE_CASE ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A () ->Optional[int]:
"""simple docstring"""
def is_sorted(_SCREAMING_SNAKE_CASE ):
return all(next_item >= item for item, next_item in zip(_SCREAMING_SNAKE_CASE , lst[1:] ) )
lowerCAmelCase__ :Optional[Any] = SkipList()
for i in range(10 ):
skip_list.insert(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_SCREAMING_SNAKE_CASE ) )
def __A () ->Any:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 93 |
"""simple docstring"""
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
__A = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The model checkpoint for weights initialization. Leave None if you want to train a model from"""
""" scratch."""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """If training from scratch, pass a model type from the list: """ + """, """.join(a )} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """The input training data file (a text file)."""} )
__magic_name__ :Optional[str] = field(
default=a , metadata={
"""help""": (
"""The input training data files (multiple files in glob format). """
"""Very often splitting large files to smaller files can prevent tokenizer going out of memory"""
)
} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input train ref data file for whole word mask in Chinese."""} , )
__magic_name__ :Optional[str] = field(
default=a , metadata={"""help""": """An optional input eval ref data file for whole word mask in Chinese."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Whether distinct lines of text in the dataset are to be handled as distinct sequences."""} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Train with masked-language modeling loss instead of language modeling."""} )
__magic_name__ :bool = field(default=a , metadata={"""help""": """Whether ot not to use whole word mask."""} )
__magic_name__ :float = field(
default=0.15 , metadata={"""help""": """Ratio of tokens to mask for masked language modeling loss"""} )
__magic_name__ :float = field(
default=1 / 6 , metadata={
"""help""": (
"""Ratio of length of a span of masked tokens to surrounding context length for permutation language"""
""" modeling."""
)
} , )
__magic_name__ :int = field(
default=5 , metadata={"""help""": """Maximum length of a span of masked tokens for permutation language modeling."""} )
__magic_name__ :int = field(
default=-1 , metadata={
"""help""": (
"""Optional input sequence length after tokenization."""
"""The training dataset will be truncated in block of this size for training."""
"""Default to the model max input length for single sentence inputs (take into account special tokens)."""
)
} , )
__magic_name__ :bool = field(
default=a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ) ->Optional[int]:
"""simple docstring"""
def _dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError('You need to set world whole masking and mlm to True for Chinese Whole Word Mask' )
return LineByLineWithRefDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=_SCREAMING_SNAKE_CASE , )
return LineByLineTextDataset(tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size )
else:
return TextDataset(
tokenizer=_SCREAMING_SNAKE_CASE , file_path=_SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=_SCREAMING_SNAKE_CASE , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(_SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def __A () ->List[Any]:
"""simple docstring"""
lowerCAmelCase__ :Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :int = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
'Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file '
'or remove the --do_eval argument.' )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowerCAmelCase__ :Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowerCAmelCase__ :List[Any] = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.tokenizer_name:
lowerCAmelCase__ :Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowerCAmelCase__ :str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another'
' script, save it,and load it from here, using --tokenizer_name' )
if model_args.model_name_or_path:
lowerCAmelCase__ :Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
else:
logger.info('Training new model from scratch' )
lowerCAmelCase__ :int = AutoModelWithLMHead.from_config(_SCREAMING_SNAKE_CASE )
model.resize_token_embeddings(len(_SCREAMING_SNAKE_CASE ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
'BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the'
'--mlm flag (masked language modeling).' )
if data_args.block_size <= 0:
lowerCAmelCase__ :Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowerCAmelCase__ :Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowerCAmelCase__ :List[str] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowerCAmelCase__ :Optional[int] = (
get_dataset(_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , evaluate=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowerCAmelCase__ :str = DataCollatorForPermutationLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowerCAmelCase__ :Optional[Any] = DataCollatorForWholeWordMask(
tokenizer=_SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability )
else:
lowerCAmelCase__ :str = DataCollatorForLanguageModeling(
tokenizer=_SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowerCAmelCase__ :Tuple = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase__ :Tuple = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=_SCREAMING_SNAKE_CASE )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase__ :Optional[Any] = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
lowerCAmelCase__ :Any = trainer.evaluate()
lowerCAmelCase__ :Optional[Any] = math.exp(eval_output['eval_loss'] )
lowerCAmelCase__ :Dict = {'perplexity': perplexity}
lowerCAmelCase__ :List[Any] = os.path.join(training_args.output_dir , 'eval_results_lm.txt' )
if trainer.is_world_master():
with open(_SCREAMING_SNAKE_CASE , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , _SCREAMING_SNAKE_CASE , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
results.update(_SCREAMING_SNAKE_CASE )
return results
def __A (_SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 93 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase : Optional[Any] = {
'''configuration_mobilenet_v2''': [
'''MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileNetV2Config''',
'''MobileNetV2OnnxConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[Any] = ['''MobileNetV2FeatureExtractor''']
__lowercase : List[Any] = ['''MobileNetV2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Union[str, Any] = [
'''MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileNetV2ForImageClassification''',
'''MobileNetV2ForSemanticSegmentation''',
'''MobileNetV2Model''',
'''MobileNetV2PreTrainedModel''',
'''load_tf_weights_in_mobilenet_v2''',
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
__lowercase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 703 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowercase ( __A : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
snake_case : Union[str, Any] = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowercase ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
'''simple docstring'''
snake_case : Union[str, Any] = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowercase ( __A : str ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", """stage2.cls_token""") )
return token
def lowercase ( ) -> Tuple:
'''simple docstring'''
snake_case : Union[str, Any] = []
head.append(("""layernorm.weight""", """norm.weight""") )
head.append(("""layernorm.bias""", """norm.bias""") )
head.append(("""classifier.weight""", """head.weight""") )
head.append(("""classifier.bias""", """head.bias""") )
return head
def lowercase ( __A : int , __A : Union[str, Any] , __A : Dict , __A : Dict ) -> int:
'''simple docstring'''
snake_case : Dict = """imagenet-1k-id2label.json"""
snake_case : Tuple = 1000
snake_case : List[Any] = """huggingface/label-files"""
snake_case : List[str] = num_labels
snake_case : Optional[Any] = json.load(open(cached_download(hf_hub_url(__A , __A , repo_type="""dataset""" ) ) , """r""" ) )
snake_case : str = {int(__A ): v for k, v in idalabel.items()}
snake_case : Union[str, Any] = idalabel
snake_case : Any = {v: k for k, v in idalabel.items()}
snake_case : Optional[Any] = CvtConfig(num_labels=__A , idalabel=__A , labelaid=__A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "13":
snake_case : Any = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("""/""" , 1 )[-1][4:6] == "21":
snake_case : Tuple = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
snake_case : Dict = [2, 2, 20]
snake_case : List[str] = [3, 12, 16]
snake_case : int = [192, 768, 1024]
snake_case : Union[str, Any] = CvtForImageClassification(__A )
snake_case : int = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
snake_case : Union[str, Any] = image_size
snake_case : Dict = torch.load(__A , map_location=torch.device("""cpu""" ) )
snake_case : List[str] = OrderedDict()
snake_case : Optional[Any] = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
snake_case : Optional[int] = list_of_state_dict + cls_token(__A )
snake_case : Dict = list_of_state_dict + embeddings(__A )
for cnt in range(config.depth[idx] ):
snake_case : Any = list_of_state_dict + attention(__A , __A )
snake_case : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__A )
for i in range(len(__A ) ):
snake_case : List[str] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser()
parser.add_argument(
'''--cvt_model''',
default='''cvt-w24''',
type=str,
help='''Name of the cvt model you\'d like to convert.''',
)
parser.add_argument(
'''--image_size''',
default=384,
type=int,
help='''Input Image Size''',
)
parser.add_argument(
'''--cvt_file_name''',
default=r'''cvtmodels\CvT-w24-384x384-IN-22k.pth''',
type=str,
help='''Input Image Size''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase : List[Any] = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 315 | 0 |
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def a ( A__ : Sequence[float] , A__ : int , A__ : int ) -> Optional[int]:
"""simple docstring"""
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
_lowercase =(low + high) // 2
_lowercase =max_subarray(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_lowercase =max_subarray(lowerCamelCase__ , mid + 1 , lowerCamelCase__ )
_lowercase =max_cross_sum(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def a ( A__ : Sequence[float] , A__ : int , A__ : int , A__ : int ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =float('-inf' ), -1
_lowercase =float('-inf' ), -1
_lowercase =0
for i in range(lowerCamelCase__ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
_lowercase =summ
_lowercase =i
_lowercase =0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
_lowercase =summ
_lowercase =i
return max_left, max_right, (left_sum + right_sum)
def a ( A__ : int ) -> int:
"""simple docstring"""
_lowercase =[randint(1 , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
_lowercase =time.time()
max_subarray(lowerCamelCase__ , 0 , input_size - 1 )
_lowercase =time.time()
return end - start
def a ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =[10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
_lowercase =[time_max_subarray(lowerCamelCase__ ) for input_size in input_sizes]
print('No of Inputs\t\tTime Taken' )
for input_size, runtime in zip(lowerCamelCase__ , lowerCamelCase__ ):
print(lowerCamelCase__ , '\t\t' , lowerCamelCase__ )
plt.plot(lowerCamelCase__ , lowerCamelCase__ )
plt.xlabel('Number of Inputs' )
plt.ylabel('Time taken in seconds' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 291 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def a__ ( self ) -> Optional[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def a__ ( self ) -> int:
A: int = ort.SessionOptions()
A: List[str] = False
return options
def a__ ( self ) -> List[str]:
A: Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
A: str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
A: Optional[int] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy""" )
# using the PNDM scheduler by default
A: Any = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
A: Union[str, Any] = """A red cat sitting on a park bench"""
A: List[str] = np.random.RandomState(0 )
A: str = pipe(
prompt=A , image=A , mask_image=A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=A , output_type="""np""" , )
A: List[Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 135 | 0 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ = 10**12 ) ->int:
lowercase_ = 1
lowercase_ = 0
lowercase_ = 1
lowercase_ = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 717 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
__snake_case = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
__snake_case = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
__snake_case = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 603 | 0 |
def lowerCAmelCase__ ( ):
'''simple docstring'''
return 1
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2) + one_pence()
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5) + two_pence(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10) + five_pence(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20) + ten_pence(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50) + twenty_pence(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100) + fifty_pence(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200) + one_pound(lowerCamelCase_)
def lowerCAmelCase__ ( lowerCamelCase_ : int = 200):
'''simple docstring'''
return two_pound(lowerCamelCase_)
if __name__ == "__main__":
print(solution(int(input().strip())))
| 647 |
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ : int = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowerCAmelCase__ : Any = tokenizer('''Hello there''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Optional[Any] = tokenizer('''Hi I am''' ,return_tensors='''np''' ).input_ids
lowerCAmelCase__ : Any = shift_tokens_right(__lowerCamelCase ,model.config.pad_token_id ,model.config.decoder_start_token_id )
lowerCAmelCase__ : Any = model(__lowerCamelCase ,decoder_input_ids=__lowerCamelCase ).logits
lowerCAmelCase__ : List[str] = optax.softmax_cross_entropy(__lowerCamelCase ,onehot(__lowerCamelCase ,logits.shape[-1] ) ).mean()
lowerCAmelCase__ : Union[str, Any] = -(labels.shape[-1] * loss.item())
lowerCAmelCase__ : Any = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 647 | 1 |
from __future__ import annotations
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> List[Any]:
"""simple docstring"""
snake_case_ = list(range(len(UpperCAmelCase__ ) ) )
snake_case_ = [v / w for v, w in zip(UpperCAmelCase__ , UpperCAmelCase__ )]
index.sort(key=lambda SCREAMING_SNAKE_CASE : ratio[i] , reverse=UpperCAmelCase__ )
snake_case_ = 0
snake_case_ = [0] * len(UpperCAmelCase__ )
for i in index:
if weight[i] <= capacity:
snake_case_ = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
assert column_title.isupper()
snake_case_ = 0
snake_case_ = len(SCREAMING_SNAKE_CASE ) - 1
snake_case_ = 0
while index >= 0:
snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 531 | 0 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = head.next, head
while fast and fast.next:
_lowerCAmelCase : int = fast.next.next
_lowerCAmelCase : List[Any] = slow.next
_lowerCAmelCase : Tuple = slow.next
_lowerCAmelCase : str = None # Don't forget here! But forget still works!
# reverse the second part
_lowerCAmelCase : str = None
while second:
_lowerCAmelCase : Any = second.next
_lowerCAmelCase : Union[str, Any] = node
_lowerCAmelCase : Any = second
_lowerCAmelCase : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_lowerCAmelCase : Dict = node.next
_lowerCAmelCase : int = head.next
return True
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_lowerCAmelCase : List[str] = head
while fast and fast.next:
_lowerCAmelCase, _lowerCAmelCase : Dict = fast.next.next, slow.next
# 2. Push the second half into the stack
_lowerCAmelCase : Union[str, Any] = [slow.val]
while slow.next:
_lowerCAmelCase : Union[str, Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_lowerCAmelCase : Any = cur.next
return True
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not head or not head.next:
return True
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = 0
while head:
if head.val in d:
d[head.val].append(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = [pos]
_lowerCAmelCase : int = head.next
pos += 1
_lowerCAmelCase : Dict = pos - 1
_lowerCAmelCase : List[Any] = 0
for v in d.values():
if len(_lowerCamelCase ) % 2 != 0:
middle += 1
else:
_lowerCAmelCase : Any = 0
for i in range(0 , len(_lowerCamelCase ) ):
if v[i] + v[len(_lowerCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 259 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , np.ndarray ):
return list(tensor.shape )
_lowerCAmelCase : Optional[Any] = tf.shape(_lowerCamelCase )
if tensor.shape == tf.TensorShape(_lowerCamelCase ):
return dynamic
_lowerCAmelCase : List[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(_lowerCamelCase )]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1e-9 , axis=_lowerCamelCase , name=_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=1e-5 , _lowerCamelCase=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
_lowerCAmelCase, _lowerCAmelCase : Optional[Any] = tf.nn.moments(_lowerCamelCase , axes=[axis] , keepdims=_lowerCamelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_lowerCAmelCase : int = [1] * inputs.shape.rank
_lowerCAmelCase : Tuple = shape_list(_lowerCamelCase )[axis]
_lowerCAmelCase : List[str] = tf.reshape(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = tf.reshape(_lowerCamelCase , _lowerCamelCase )
# Compute layer normalization using the batch_normalization
# function.
_lowerCAmelCase : Tuple = tf.nn.batch_normalization(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , offset=_lowerCamelCase , scale=_lowerCamelCase , variance_epsilon=_lowerCamelCase , )
return outputs
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_lowerCAmelCase : Optional[Any] = tf.shape(_lowerCamelCase )
_lowerCAmelCase : int = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_lowerCAmelCase : int = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , tf.Tensor ):
_lowerCAmelCase : Optional[int] = tf.convert_to_tensor(_lowerCamelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_lowerCAmelCase : List[str] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_lowerCAmelCase : Optional[int] = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_lowerCAmelCase : int = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
_lowerCamelCase , tf.cast(_lowerCamelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(_lowerCamelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_lowerCAmelCase : Tuple = [x for x in data if len(_lowerCamelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
_lowerCAmelCase : Tuple = np.asarray(_lowerCamelCase )
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : List[Any] = np.array_split(_lowerCamelCase , _lowerCamelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_lowerCAmelCase : Tuple = np.array_split(_lowerCamelCase , _lowerCamelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(_lowerCamelCase ):
_lowerCAmelCase : str = chunk_data
else:
_lowerCAmelCase : Dict = data
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if name in group.attrs:
_lowerCAmelCase : int = [n.decode('utf8' ) if hasattr(_lowerCamelCase , 'decode' ) else n for n in group.attrs[name]]
else:
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Any = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(_lowerCamelCase , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
def _expand_single_ad_tensor(_lowerCamelCase ):
if isinstance(_lowerCamelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(_lowerCamelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , _lowerCamelCase )
| 259 | 1 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class snake_case_ (unittest.TestCase ):
def __init__( self :Optional[Any] ,__snake_case :List[Any] ,__snake_case :Tuple=13 ,__snake_case :str=7 ,__snake_case :Tuple=True ,__snake_case :str=True ,__snake_case :Union[str, Any]=True ,__snake_case :List[str]=True ,__snake_case :Optional[int]=99 ,__snake_case :List[str]=32 ,__snake_case :Union[str, Any]=5 ,__snake_case :Union[str, Any]=4 ,__snake_case :int=37 ,__snake_case :List[Any]="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Any=0.1 ,__snake_case :List[Any]=5_12 ,__snake_case :Optional[int]=16 ,__snake_case :int=2 ,__snake_case :str=0.02 ,__snake_case :Optional[Any]=4 ,) -> List[str]:
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_attention_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_choices
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_attention_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = DistilBertConfig(
vocab_size=self.vocab_size ,dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,hidden_dim=self.intermediate_size ,hidden_act=self.hidden_act ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,tie_weights_=__snake_case ,)
return config, input_ids, attention_mask
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__( self :int ) -> Optional[Any]:
a__ = FlaxDistilBertModelTester(self )
@slow
def lowerCamelCase__( self :int ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
a__ = model_class_name.from_pretrained('distilbert-base-uncased' )
a__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class snake_case_ (unittest.TestCase ):
@slow
def lowerCamelCase__( self :Tuple ) -> Optional[Any]:
a__ = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
a__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
a__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a__ = model(__snake_case ,attention_mask=__snake_case )[0]
a__ = (1, 11, 7_68)
self.assertEqual(output.shape ,__snake_case )
a__ = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] ,__snake_case ,atol=1E-4 ) )
| 711 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_UpperCamelCase = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_UpperCamelCase = {
"gpt-neox-20b": 20_48,
}
class __UpperCAmelCase (__A ):
'''simple docstring'''
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="<|endoftext|>" , snake_case_="<|endoftext|>" , snake_case_="<|endoftext|>" , snake_case_=False , **snake_case_ , ):
'''simple docstring'''
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
A__ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
A__ : Union[str, Any] = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
A__ : List[Any] = add_prefix_space
A__ : List[Any] = pre_tok_class(**snake_case_ )
A__ : Optional[Any] = add_prefix_space
def lowerCamelCase ( self , snake_case_ , snake_case_ = None ):
'''simple docstring'''
A__ : List[Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self , snake_case_ ):
'''simple docstring'''
A__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case_ , add_special_tokens=snake_case_ ) + [self.eos_token_id] )
if len(snake_case_ ) > self.model_max_length:
A__ : Dict = input_ids[-self.model_max_length :]
return input_ids
| 363 | """simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1024 ):
A__ , A__ : Any = [], []
A__ : List[Any] = list(zip(lowerCAmelCase , lowerCAmelCase ) )
A__ , A__ : Optional[int] = sorted_examples[0]
def is_too_big(lowerCAmelCase ):
return tok(lowerCAmelCase , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
A__ : List[Any] = new_src + """ """ + src
A__ : List[str] = new_tgt + """ """ + tgt
if is_too_big(lowerCAmelCase ) or is_too_big(lowerCAmelCase ): # cant fit, finalize example
finished_src.append(lowerCAmelCase )
finished_tgt.append(lowerCAmelCase )
A__ , A__ : str = src, tgt
else: # can fit, keep adding
A__ , A__ : Union[str, Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase )
finished_tgt.append(lowerCAmelCase )
return finished_src, finished_tgt
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
A__ : Optional[int] = Path(lowerCAmelCase )
save_path.mkdir(exist_ok=lowerCAmelCase )
for split in ["train"]:
A__ , A__ : List[Any] = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
A__ : Tuple = [x.rstrip() for x in Path(lowerCAmelCase ).open().readlines()]
A__ : Union[str, Any] = [x.rstrip() for x in Path(lowerCAmelCase ).open().readlines()]
A__ , A__ : Tuple = pack_examples(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
print(F'''packed {split} split from {len(lowerCAmelCase )} examples -> {len(lowerCAmelCase )}.''' )
Path(save_path / F'''{split}.source''' ).open("""w""" ).write("""\n""".join(lowerCAmelCase ) )
Path(save_path / F'''{split}.target''' ).open("""w""" ).write("""\n""".join(lowerCAmelCase ) )
for split in ["val", "test"]:
A__ , A__ : Dict = data_dir / F'''{split}.source''', data_dir / F'''{split}.target'''
shutil.copyfile(lowerCAmelCase , save_path / F'''{split}.source''' )
shutil.copyfile(lowerCAmelCase , save_path / F'''{split}.target''' )
def _A( ):
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=lowerCAmelCase , default=128 )
parser.add_argument("""--data_dir""" , type=lowerCAmelCase )
parser.add_argument("""--save_path""" , type=lowerCAmelCase )
A__ : str = parser.parse_args()
A__ : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 363 | 1 |
def lowercase__ ( __A: float ,__A: float ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(1_00, 0.25) = }""")
print(F"""{price_plus_tax(125.50, 0.05) = }""")
| 501 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCamelCase_ : NestedDataStructureLike[PathLike] , lowerCamelCase_ : Optional[NamedSplit] = None , lowerCamelCase_ : Optional[Features] = None , lowerCamelCase_ : str = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : str , ) -> Dict:
super().__init__(
lowerCamelCase_ , split=lowerCamelCase_ , features=lowerCamelCase_ , cache_dir=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ , streaming=lowerCamelCase_ , num_proc=lowerCamelCase_ , **lowerCamelCase_ , )
__magic_name__ : Optional[int] = field
__magic_name__ : Optional[Any] = path_or_paths if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else {self.split: path_or_paths}
__magic_name__ : Optional[Any] = Json(
cache_dir=lowerCamelCase_ , data_files=lowerCamelCase_ , features=lowerCamelCase_ , field=lowerCamelCase_ , **lowerCamelCase_ , )
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[str]:
# Build iterable dataset
if self.streaming:
__magic_name__ : Optional[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__magic_name__ : int = None
__magic_name__ : Tuple = None
__magic_name__ : Tuple = None
__magic_name__ : Optional[int] = None
self.builder.download_and_prepare(
download_config=lowerCamelCase_ , download_mode=lowerCamelCase_ , verification_mode=lowerCamelCase_ , base_path=lowerCamelCase_ , num_proc=self.num_proc , )
__magic_name__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=lowerCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class lowerCamelCase :
'''simple docstring'''
def __init__( self : int , lowerCamelCase_ : Dataset , lowerCamelCase_ : Union[PathLike, BinaryIO] , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : List[Any] , ) -> List[Any]:
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
__magic_name__ : Optional[Any] = dataset
__magic_name__ : Tuple = path_or_buf
__magic_name__ : Dict = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__magic_name__ : Any = num_proc
__magic_name__ : Tuple = '''utf-8'''
__magic_name__ : Tuple = to_json_kwargs
def UpperCAmelCase__ ( self : Dict ) -> int:
__magic_name__ : int = self.to_json_kwargs.pop('''path_or_buf''' , lowerCamelCase_ )
__magic_name__ : Any = self.to_json_kwargs.pop('''orient''' , '''records''' )
__magic_name__ : List[Any] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__magic_name__ : Optional[int] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__magic_name__ : List[str] = self.to_json_kwargs.pop('''compression''' , lowerCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=lowerCamelCase_ ) as buffer:
__magic_name__ : str = self._write(file_obj=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__magic_name__ : List[str] = self._write(
file_obj=self.path_or_buf , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Dict ) -> Optional[int]:
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Any = args
__magic_name__ : List[Any] = query_table(
table=self.dataset.data , key=slice(lowerCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
__magic_name__ : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase_ , orient=lowerCamelCase_ , lines=lowerCamelCase_ , index=lowerCamelCase_ , **lowerCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self : int , lowerCamelCase_ : BinaryIO , lowerCamelCase_ : List[str] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Any , **lowerCamelCase_ : Optional[Any] , ) -> int:
__magic_name__ : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__magic_name__ : List[str] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase_ )
else:
__magic_name__ , __magic_name__ : Optional[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCamelCase_ , lowerCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(lowerCamelCase_ )
return written
| 501 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = inspect.getfile(accelerate.test_utils )
UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCamelCase = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = f'''\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '''.split()
UpperCamelCase = [sys.executable] + distributed_args
execute_subprocess_async(SCREAMING_SNAKE_CASE , env=os.environ.copy() )
| 606 |
'''simple docstring'''
import argparse
from collections import defaultdict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = F"{file}_{class_name}_{test_name}"
done_test[_id] += 1
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : Tuple = F"class {class_name}("
UpperCAmelCase : str = F"{4 * ' '}def {test_name}("
UpperCAmelCase : Dict = F"{8 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Tuple = F"{16 * ' '}{correct_line.split()[0]}"
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Tuple = 0
UpperCAmelCase : int = 0
UpperCAmelCase : Tuple = []
for line in lines:
if line.startswith(__magic_name__ ):
UpperCAmelCase : int = True
elif in_class and line.startswith(__magic_name__ ):
UpperCAmelCase : Dict = True
elif in_class and in_func and (line.startswith(__magic_name__ ) or line.startswith(__magic_name__ )):
UpperCAmelCase : List[str] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase : List[str] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"{spaces * ' '}{correct_line}" )
UpperCAmelCase : List[str] = False
else:
new_lines.append(__magic_name__ )
with open(__magic_name__ , "w" ) as f:
for line in new_lines:
f.write(__magic_name__ )
def lowercase ( __magic_name__ , __magic_name__=None ):
'''simple docstring'''
if fail is not None:
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Optional[int] = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase : Any = None
with open(__magic_name__ , "r" ) as f:
UpperCAmelCase : Tuple = f.readlines()
UpperCAmelCase : int = defaultdict(__magic_name__ )
for line in correct_lines:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
a : str = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
a : List[Any] = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 679 | 0 |
'''simple docstring'''
from math import log
from scipy.constants import Boltzmann, physical_constants
__UpperCAmelCase = 300 # TEMPERATURE (unit = K)
def SCREAMING_SNAKE_CASE_ ( snake_case_ : float , snake_case_ : float , snake_case_ : float , ) -> float:
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__UpperCAmelCase = TypeVar('KT')
__UpperCAmelCase = TypeVar('VT')
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = "root" ,__SCREAMING_SNAKE_CASE = None ):
SCREAMING_SNAKE_CASE : Optional[Any] = key
SCREAMING_SNAKE_CASE : Optional[Any] = value
SCREAMING_SNAKE_CASE : list[Node[KT, VT]] = []
def __repr__( self ):
return f"""Node({self.key}: {self.value})"""
@property
def __a ( self ):
return len(self.forward )
class _a ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self ,__SCREAMING_SNAKE_CASE = 0.5 ,__SCREAMING_SNAKE_CASE = 16 ):
SCREAMING_SNAKE_CASE : Node[KT, VT] = Node[KT, VT]()
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[Any] = p
SCREAMING_SNAKE_CASE : Dict = max_level
def __str__( self ):
SCREAMING_SNAKE_CASE : Union[str, Any] = list(self )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return f"""SkipList(level={self.level})"""
SCREAMING_SNAKE_CASE : Optional[Any] = max((len(str(__SCREAMING_SNAKE_CASE ) ) for item in items) ,default=4 )
SCREAMING_SNAKE_CASE : List[str] = max(__SCREAMING_SNAKE_CASE ,4 ) + 4
SCREAMING_SNAKE_CASE : List[Any] = self.head
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Optional[int] = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
while len(node.forward ) != 0:
SCREAMING_SNAKE_CASE : str = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(__SCREAMING_SNAKE_CASE ,'-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(__SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE : Optional[Any] = node.forward
lines.append('None'.ljust(__SCREAMING_SNAKE_CASE ) + '* ' * len(__SCREAMING_SNAKE_CASE ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(__SCREAMING_SNAKE_CASE )
def __iter__( self ):
SCREAMING_SNAKE_CASE : Tuple = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
SCREAMING_SNAKE_CASE : Any = node.forward[0]
def __a ( self ):
SCREAMING_SNAKE_CASE : Dict = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = []
SCREAMING_SNAKE_CASE : str = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(__SCREAMING_SNAKE_CASE )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
for i, update_node in enumerate(__SCREAMING_SNAKE_CASE ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
SCREAMING_SNAKE_CASE : Dict = node.forward[i]
else:
SCREAMING_SNAKE_CASE : Optional[Any] = update_node.forward[:i]
def __a ( self ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = value
else:
SCREAMING_SNAKE_CASE : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 ,__SCREAMING_SNAKE_CASE ):
update_vector.append(self.head )
SCREAMING_SNAKE_CASE : str = level
SCREAMING_SNAKE_CASE : List[str] = Node(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(__SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : List[Any] = new_node
def __a ( self ,__SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = self._locate_node(__SCREAMING_SNAKE_CASE )
if node is not None:
return node.value
return None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Any = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
SCREAMING_SNAKE_CASE : int = skip_list.head
SCREAMING_SNAKE_CASE : List[str] = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = node.forward[0]
SCREAMING_SNAKE_CASE : List[str] = node.value
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
SCREAMING_SNAKE_CASE : List[str] = skip_list.head
SCREAMING_SNAKE_CASE : str = {}
while node.level != 0:
SCREAMING_SNAKE_CASE : int = node.forward[0]
SCREAMING_SNAKE_CASE : Any = node.value
if len(snake_case_ ) != 4:
print()
assert len(snake_case_ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def SCREAMING_SNAKE_CASE_ ( ) -> str:
SCREAMING_SNAKE_CASE : Tuple = SkipList()
assert skip_list.find('Some key' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Union[str, Any] = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
SCREAMING_SNAKE_CASE : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(snake_case_ : Optional[int] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(snake_case_ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[Any]:
def is_sorted(snake_case_ : int ):
return all(next_item >= item for item, next_item in zip(snake_case_ , lst[1:] ) )
SCREAMING_SNAKE_CASE : Optional[int] = SkipList()
for i in range(10 ):
skip_list.insert(snake_case_ , snake_case_ )
assert is_sorted(list(snake_case_ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(snake_case_ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(snake_case_ ) )
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : List[str] = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 220 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_ ( __a ) -> Dict:
a__ : Dict = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2]
a__ : List[str] = True if "large" in model_name or "huge" in model_name else False
a__ : List[Any] = True if "large" in model_name or "huge" in model_name else False
a__ : List[str] = True if "large" in model_name or "huge" in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
a__ : Dict = [3, 3, 3, 3]
a__ : Optional[int] = [5, 5, 5, 5]
elif "fl4" in model_name:
a__ : List[str] = [4, 4, 4, 4]
a__ : Optional[int] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
a__ : str = [3, 3, 3, 3]
if "lrf" in model_name:
a__ : Dict = [3, 3, 3, 3]
else:
a__ : Optional[int] = [2, 2, 2, 2]
if "tiny" in model_name:
a__ : int = 96
elif "small" in model_name:
a__ : List[str] = 96
elif "base" in model_name:
a__ : Union[str, Any] = 128
elif "large" in model_name:
a__ : Tuple = 192
elif "xlarge" in model_name:
a__ : Optional[Any] = 256
elif "huge" in model_name:
a__ : Optional[Any] = 352
# set label information
a__ : Optional[Any] = "huggingface/label-files"
if "large" in model_name or "huge" in model_name:
a__ : List[str] = "imagenet-22k-id2label.json"
else:
a__ : List[Any] = "imagenet-1k-id2label.json"
a__ : Optional[int] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
a__ : Optional[int] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
a__ : int = {v: k for k, v in idalabel.items()}
a__ : List[Any] = FocalNetConfig(
embed_dim=_UpperCamelCase , depths=_UpperCamelCase , focal_levels=_UpperCamelCase , focal_windows=_UpperCamelCase , use_conv_embed=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase , use_post_layernorm=_UpperCamelCase , use_layerscale=_UpperCamelCase , )
return config
def UpperCamelCase_ ( __a ) -> List[str]:
if "patch_embed.proj" in name:
a__ : List[Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a__ : str = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
a__ : Union[str, Any] = "encoder." + name
if "encoder.layers" in name:
a__ : str = name.replace("encoder.layers" , "encoder.stages" )
if "downsample.proj" in name:
a__ : Any = name.replace("downsample.proj" , "downsample.projection" )
if "blocks" in name:
a__ : Any = name.replace("blocks" , "layers" )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
a__ : int = name.replace("modulation.f" , "modulation.projection_in" )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
a__ : Union[str, Any] = name.replace("modulation.h" , "modulation.projection_context" )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
a__ : Optional[int] = name.replace("modulation.proj" , "modulation.projection_out" )
if name == "norm.weight":
a__ : List[Any] = "layernorm.weight"
if name == "norm.bias":
a__ : List[Any] = "layernorm.bias"
if "head" in name:
a__ : Dict = name.replace("head" , "classifier" )
else:
a__ : Dict = "focalnet." + name
return name
def UpperCamelCase_ ( __a , __a , __a=False ) -> str:
a__ : Union[str, Any] = {
"focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth",
"focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth",
"focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth",
"focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth",
"focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth",
"focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth",
"focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth",
"focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth",
"focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth",
"focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth",
}
# fmt: on
a__ : int = model_name_to_url[model_name]
print("Checkpoint URL: " , _UpperCamelCase )
a__ : List[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["model"]
# rename keys
for key in state_dict.copy().keys():
a__ : List[str] = state_dict.pop(_UpperCamelCase )
a__ : int = val
a__ : List[str] = get_focalnet_config(_UpperCamelCase )
a__ : Optional[int] = FocalNetForImageClassification(_UpperCamelCase )
model.eval()
# load state dict
model.load_state_dict(_UpperCamelCase )
# verify conversion
a__ : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
a__ : Optional[Any] = BitImageProcessor(
do_resize=_UpperCamelCase , size={"shortest_edge": 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase , crop_size=224 , do_normalize=_UpperCamelCase , image_mean=_UpperCamelCase , image_std=_UpperCamelCase , )
a__ : List[str] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
a__ : Optional[Any] = processor(images=_UpperCamelCase , return_tensors="pt" )
a__ : Any = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
a__ : Dict = image_transforms(_UpperCamelCase ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _UpperCamelCase , atol=1e-4 )
a__ : List[Any] = model(**_UpperCamelCase )
a__ : Optional[int] = outputs.logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
print("First values of logits:" , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
a__ : List[str] = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
a__ : Dict = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
a__ : Optional[Any] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
a__ : Tuple = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
a__ : Dict = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
a__ : List[Any] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCamelCase )
processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
UpperCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37 |
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def lowercase_ ( _UpperCamelCase , _UpperCamelCase=1.0 , _UpperCamelCase=None , _UpperCamelCase=None ):
'''simple docstring'''
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=7 , snake_case_=4_0_0 , snake_case_=2_0_0_0 , snake_case_=1 , snake_case_=0.0 , snake_case_=1_6_0_0_0 , snake_case_=True , snake_case_=8_0 , snake_case_=1_6 , snake_case_=6_4 , snake_case_="hann_window" , snake_case_=8_0 , snake_case_=7_6_0_0 , snake_case_=1e-1_0 , snake_case_=True , ) -> Tuple:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = feature_size
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = do_normalize
__lowercase = num_mel_bins
__lowercase = hop_length
__lowercase = win_length
__lowercase = win_function
__lowercase = fmin
__lowercase = fmax
__lowercase = mel_floor
__lowercase = return_attention_mask
def A ( self ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def A ( self , snake_case_=False , snake_case_=False ) -> Tuple:
'''simple docstring'''
def _flatten(snake_case_ ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
__lowercase = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowercase = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
def A ( self , snake_case_=False , snake_case_=False ) -> Any:
'''simple docstring'''
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase = SpeechTaFeatureExtractor
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = SpeechTaFeatureExtractionTester(self )
def A ( self , snake_case_ ) -> str:
'''simple docstring'''
self.assertTrue(np.all(np.mean(snake_case_ , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ , axis=0 ) - 1 ) < 1e-3 ) )
def A ( self ) -> int:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test not batched input
__lowercase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowercase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
__lowercase = feat_extract(snake_case_ , return_tensors='''np''' ).input_values
__lowercase = feat_extract(snake_case_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def A ( self ) -> Any:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = ['''longest''', '''max_length''', '''do_not_pad''']
__lowercase = [None, 1_6_0_0, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
__lowercase = feat_extract(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__lowercase = [floats_list((1, x) )[0] for x in lengths]
__lowercase = ['''longest''', '''max_length''', '''do_not_pad''']
__lowercase = [None, 1_6_0_0, None]
for max_length, padding in zip(snake_case_ , snake_case_ ):
__lowercase = feat_extract(snake_case_ , max_length=snake_case_ , padding=snake_case_ )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = feat_extract(
snake_case_ , truncation=snake_case_ , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__lowercase = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_0_0 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__lowercase = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(audio_target=snake_case_ , padding=snake_case_ , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
__lowercase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase = np.asarray(snake_case_ )
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
__lowercase = feature_extractor(snake_case_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(snake_case_ ) == len(snake_case_ ) for x, y in zip(snake_case_ , processed_features[input_name] ) ) )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=snake_case_ )
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self ) -> List[str]:
'''simple docstring'''
__lowercase = self.feat_extract_tester.prepare_inputs_for_target(equal_length=snake_case_ )
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__lowercase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__lowercase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(snake_case_ , padding='''longest''' , return_tensors='''np''' )[input_name]
__lowercase = feat_extract.pad(snake_case_ , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**snake_case_ )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = [len(snake_case_ ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(snake_case_ , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , snake_case_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , snake_case_ )
def A ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.feat_extract_dict
__lowercase = True
__lowercase = self.feature_extraction_class(**snake_case_ )
__lowercase = self.feat_extract_tester.prepare_inputs_for_target()
__lowercase = [len(snake_case_ ) for x in speech_inputs]
__lowercase = feat_extract.model_input_names[0]
__lowercase = BatchFeature({input_name: speech_inputs} )
__lowercase = min(snake_case_ )
__lowercase = feat_extract.num_mel_bins # hack!
__lowercase = feat_extract.pad(
snake_case_ , padding='''max_length''' , max_length=snake_case_ , truncation=snake_case_ , return_tensors='''np''' )
self.assertIn('''attention_mask''' , snake_case_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def A ( self , snake_case_ ) -> str:
'''simple docstring'''
from datasets import load_dataset
__lowercase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__lowercase = ds.sort('''id''' ).select(range(snake_case_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A ( self ) -> Optional[int]:
'''simple docstring'''
__lowercase = torch.tensor(
[2.3_8_0_4e-0_3, 2.0_7_5_2e-0_3, 1.9_8_3_6e-0_3, 2.1_0_5_7e-0_3, 1.6_1_7_4e-0_3,
3.0_5_1_8e-0_4, 9.1_5_5_3e-0_5, 3.3_5_6_9e-0_4, 9.7_6_5_6e-0_4, 1.8_3_1_1e-0_3,
2.0_1_4_2e-0_3, 2.1_0_5_7e-0_3, 1.7_3_9_5e-0_3, 4.5_7_7_6e-0_4, -3.9_6_7_3e-0_4,
4.5_7_7_6e-0_4, 1.0_0_7_1e-0_3, 9.1_5_5_3e-0_5, 4.8_8_2_8e-0_4, 1.1_5_9_7e-0_3,
7.3_2_4_2e-0_4, 9.4_6_0_4e-0_4, 1.8_0_0_5e-0_3, 1.8_3_1_1e-0_3, 8.8_5_0_1e-0_4,
4.2_7_2_5e-0_4, 4.8_8_2_8e-0_4, 7.3_2_4_2e-0_4, 1.0_9_8_6e-0_3, 2.1_0_5_7e-0_3] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = SpeechTaFeatureExtractor()
__lowercase = feature_extractor(snake_case_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , snake_case_ , atol=1e-6 ) )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = torch.tensor(
[-2.6_8_7_0, -3.0_1_0_4, -3.1_3_5_6, -3.5_3_5_2, -3.0_0_4_4, -3.0_3_5_3, -3.4_7_1_9, -3.6_7_7_7,
-3.1_5_2_0, -2.9_4_3_5, -2.6_5_5_3, -2.8_7_9_5, -2.9_9_4_4, -2.5_9_2_1, -3.0_2_7_9, -3.0_3_8_6,
-3.0_8_6_4, -3.1_2_9_1, -3.2_3_5_3, -2.7_4_4_4, -2.6_8_3_1, -2.7_2_8_7, -3.1_7_6_1, -3.1_5_7_1,
-3.2_7_2_6, -3.0_5_8_2, -3.1_0_0_7, -3.4_5_3_3, -3.4_6_9_5, -3.0_9_9_8] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = SpeechTaFeatureExtractor()
__lowercase = feature_extractor(audio_target=snake_case_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , snake_case_ , atol=1e-4 ) )
| 639 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __a :
"""simple docstring"""
def __init__( self : str ,_UpperCamelCase : Collection[float] | None = None ) -> None:
'''simple docstring'''
if components is None:
SCREAMING_SNAKE_CASE__ =[]
SCREAMING_SNAKE_CASE__ =list(_UpperCamelCase )
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self : Dict ) -> str:
'''simple docstring'''
return "(" + ",".join(map(_UpperCamelCase ,self.__components ) ) + ")"
def __add__( self : Optional[Any] ,_UpperCamelCase : Vector ) -> Vector:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =len(self )
if size == len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[self.__components[i] + other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )]
return Vector(_UpperCamelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self : Optional[Any] ,_UpperCamelCase : Vector ) -> Vector:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =len(self )
if size == len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[self.__components[i] - other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )]
return Vector(_UpperCamelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : str ,_UpperCamelCase : float ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self : int ,_UpperCamelCase : Vector ) -> float:
'''simple docstring'''
...
def __mul__( self : Union[str, Any] ,_UpperCamelCase : float | Vector ) -> float | Vector:
'''simple docstring'''
if isinstance(_UpperCamelCase ,(float, int) ):
SCREAMING_SNAKE_CASE__ =[c * other for c in self.__components]
return Vector(_UpperCamelCase )
elif isinstance(_UpperCamelCase ,_UpperCamelCase ) and len(self ) == len(_UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =len(self )
SCREAMING_SNAKE_CASE__ =[self.__components[i] * other.component(_UpperCamelCase ) for i in range(_UpperCamelCase )]
return sum(_UpperCamelCase )
else: # error case
raise Exception("""invalid operand!""" )
def __A ( self : int ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def __A ( self : Optional[int] ,_UpperCamelCase : int ) -> float:
'''simple docstring'''
if isinstance(_UpperCamelCase ,_UpperCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __A ( self : int ,_UpperCamelCase : int ,_UpperCamelCase : float ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
SCREAMING_SNAKE_CASE__ =value
def __A ( self : str ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
SCREAMING_SNAKE_CASE__ =[c**2 for c in self.__components]
return math.sqrt(sum(_UpperCamelCase ) )
def __A ( self : Union[str, Any] ,_UpperCamelCase : Vector ,_UpperCamelCase : bool = False ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self * other
SCREAMING_SNAKE_CASE__ =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCAmelCase_ ( __UpperCamelCase ):
assert isinstance(__UpperCamelCase, __UpperCamelCase )
return Vector([0] * dimension )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase ):
assert isinstance(__UpperCamelCase, __UpperCamelCase ) and (isinstance(__UpperCamelCase, __UpperCamelCase ))
SCREAMING_SNAKE_CASE__ =[0] * dimension
SCREAMING_SNAKE_CASE__ =1
return Vector(__UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
assert (
isinstance(__UpperCamelCase, __UpperCamelCase )
and isinstance(__UpperCamelCase, __UpperCamelCase )
and (isinstance(__UpperCamelCase, (int, float) ))
)
return x * scalar + y
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
random.seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[random.randint(__UpperCamelCase, __UpperCamelCase ) for _ in range(__UpperCamelCase )]
return Vector(__UpperCamelCase )
class __a :
"""simple docstring"""
def __init__( self : int ,_UpperCamelCase : list[list[float]] ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =matrix
SCREAMING_SNAKE_CASE__ =w
SCREAMING_SNAKE_CASE__ =h
def __str__( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =""""""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Tuple ,_UpperCamelCase : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ =[]
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ =[
self.__matrix[i][j] + other.component(_UpperCamelCase ,_UpperCamelCase )
for j in range(self.__width )
]
matrix.append(_UpperCamelCase )
return Matrix(_UpperCamelCase ,self.__width ,self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : Tuple ,_UpperCamelCase : Matrix ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
SCREAMING_SNAKE_CASE__ =[]
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ =[
self.__matrix[i][j] - other.component(_UpperCamelCase ,_UpperCamelCase )
for j in range(self.__width )
]
matrix.append(_UpperCamelCase )
return Matrix(_UpperCamelCase ,self.__width ,self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : int ,_UpperCamelCase : float ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self : List[str] ,_UpperCamelCase : Vector ) -> Vector:
'''simple docstring'''
...
def __mul__( self : Tuple ,_UpperCamelCase : float | Vector ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(_UpperCamelCase ,_UpperCamelCase ): # matrix-vector
if len(_UpperCamelCase ) == self.__width:
SCREAMING_SNAKE_CASE__ =zero_vector(self.__height )
for i in range(self.__height ):
SCREAMING_SNAKE_CASE__ =[
self.__matrix[i][j] * other.component(_UpperCamelCase )
for j in range(self.__width )
]
ans.change_component(_UpperCamelCase ,sum(_UpperCamelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(_UpperCamelCase ,(int, float) ): # matrix-scalar
SCREAMING_SNAKE_CASE__ =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_UpperCamelCase ,self.__width ,self.__height )
return None
def __A ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.__height
def __A ( self : Any ) -> int:
'''simple docstring'''
return self.__width
def __A ( self : Any ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __A ( self : Optional[Any] ,_UpperCamelCase : int ,_UpperCamelCase : int ,_UpperCamelCase : float ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
SCREAMING_SNAKE_CASE__ =value
else:
raise Exception("""change_component: indices out of bounds""" )
def __A ( self : List[Any] ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
SCREAMING_SNAKE_CASE__ =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ =minor[i][:y] + minor[i][y + 1 :]
return Matrix(_UpperCamelCase ,self.__width - 1 ,self.__height - 1 ).determinant()
def __A ( self : Optional[int] ,_UpperCamelCase : int ,_UpperCamelCase : int ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_UpperCamelCase ,_UpperCamelCase )
else:
raise Exception("""Indices out of bounds""" )
def __A ( self : List[str] ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
SCREAMING_SNAKE_CASE__ =[
self.__matrix[0][y] * self.cofactor(0 ,_UpperCamelCase ) for y in range(self.__width )
]
return sum(_UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ =[[0] * n for _ in range(__UpperCamelCase )]
return Matrix(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
def UpperCAmelCase_ ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, __UpperCamelCase ):
random.seed(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ =[
[random.randint(__UpperCamelCase, __UpperCamelCase ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase )
]
return Matrix(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
| 588 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __a ( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_A : Union[str, Any] = KandinskyInpaintPipeline
_A : Any = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
_A : Any = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
_A : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_A : int = False
@property
def __A ( self : Any ) -> Dict:
'''simple docstring'''
return 3_2
@property
def __A ( self : Optional[int] ) -> int:
'''simple docstring'''
return 3_2
@property
def __A ( self : List[Any] ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : Optional[int] ) -> str:
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return 1_0_0
@property
def __A ( self : str ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =MCLIPConfig(
numDims=self.cross_attention_dim ,transformerDimensions=self.text_embedder_hidden_size ,hidden_size=self.text_embedder_hidden_size ,intermediate_size=3_7 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=1_0_0_5 ,)
SCREAMING_SNAKE_CASE__ =MultilingualCLIP(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =text_encoder.eval()
return text_encoder
@property
def __A ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ ={
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
SCREAMING_SNAKE_CASE__ =UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __A ( self : str ) -> Tuple:
'''simple docstring'''
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ =VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ =self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ =self.dummy_unet
SCREAMING_SNAKE_CASE__ =self.dummy_movq
SCREAMING_SNAKE_CASE__ =DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule="""linear""" ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=_UpperCamelCase ,set_alpha_to_one=_UpperCamelCase ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_UpperCamelCase ,)
SCREAMING_SNAKE_CASE__ ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self : Union[str, Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Any=0 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =floats_tensor((1, self.cross_attention_dim) ,rng=random.Random(seed + 1 ) ).to(_UpperCamelCase )
# create init_image
SCREAMING_SNAKE_CASE__ =floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE__ =Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
# create mask
SCREAMING_SNAKE_CASE__ =np.ones((6_4, 6_4) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =0
if str(_UpperCamelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ =torch.manual_seed(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ =torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ ={
"""prompt""": """horse""",
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def __A ( self : Tuple ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ="""cpu"""
SCREAMING_SNAKE_CASE__ =self.get_dummy_components()
SCREAMING_SNAKE_CASE__ =self.pipeline_class(**_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ =output.images
SCREAMING_SNAKE_CASE__ =pipe(
**self.get_dummy_inputs(_UpperCamelCase ) ,return_dict=_UpperCamelCase ,)[0]
SCREAMING_SNAKE_CASE__ =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ =image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
SCREAMING_SNAKE_CASE__ =np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self : Dict ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
SCREAMING_SNAKE_CASE__ =np.ones((7_6_8, 7_6_8) ,dtype=np.floataa )
SCREAMING_SNAKE_CASE__ =0
SCREAMING_SNAKE_CASE__ ="""a hat"""
SCREAMING_SNAKE_CASE__ =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =KandinskyInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-inpaint""" ,torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ =pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
SCREAMING_SNAKE_CASE__ =torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ =pipe_prior(
_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
SCREAMING_SNAKE_CASE__ =pipeline(
_UpperCamelCase ,image=_UpperCamelCase ,mask_image=_UpperCamelCase ,image_embeds=_UpperCamelCase ,negative_image_embeds=_UpperCamelCase ,generator=_UpperCamelCase ,num_inference_steps=1_0_0 ,height=7_6_8 ,width=7_6_8 ,output_type="""np""" ,)
SCREAMING_SNAKE_CASE__ =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_UpperCamelCase ,_UpperCamelCase )
| 588 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.txt'}
lowercase__ ={
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowercase__ ={
'openbmb/cpm-ant-10b': 1024,
}
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : Optional[Any] = collections.OrderedDict()
with open(lowerCAmelCase__ , '''r''' , encoding='''utf-8''' ) as reader:
__a : List[Any] = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
__a : Dict = token.rstrip('''\n''' )
__a : str = index
return vocab
class UpperCamelCase__ ( snake_case_ ):
def __init__(self : int , snake_case_ : List[Any] , snake_case_ : List[Any]="<unk>" , snake_case_ : Optional[int]=2_0_0 ):
__a : Dict = vocab
__a : Union[str, Any] = unk_token
__a : int = max_input_chars_per_word
def lowerCAmelCase (self : Dict , snake_case_ : int ):
__a : List[str] = list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
__a : Optional[Any] = 0
__a : Tuple = []
while start < len(UpperCamelCase__ ):
__a : Union[str, Any] = len(UpperCamelCase__ )
__a : Tuple = None
while start < end:
__a : Optional[int] = "".join(chars[start:end] )
if substr in self.vocab:
__a : List[str] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
__a : str = end
return sub_tokens
class UpperCamelCase__ ( snake_case_ ):
_SCREAMING_SNAKE_CASE : Any = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : List[Any] = False
def __init__(self : Dict , snake_case_ : int , snake_case_ : Dict="<d>" , snake_case_ : List[str]="</d>" , snake_case_ : Tuple="<s>" , snake_case_ : Optional[int]="</s>" , snake_case_ : int="<pad>" , snake_case_ : Tuple="<unk>" , snake_case_ : Tuple="</n>" , snake_case_ : List[Any]="</_>" , snake_case_ : Tuple="left" , **snake_case_ : Optional[int] , ):
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
__a : str = bod_token
__a : Optional[Any] = eod_token
__a : Union[str, Any] = load_vocab(UpperCamelCase__ )
__a : Optional[Any] = self.encoder[space_token]
__a : Optional[int] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__a : List[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case_ : x[1] ) )
__a : Dict = {v: k for k, v in self.encoder.items()}
__a : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase (self : Tuple ):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase (self : Union[str, Any] ):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase (self : Optional[int] ):
return self.encoder["\n"]
@property
def lowerCAmelCase (self : str ):
return len(self.encoder )
def lowerCAmelCase (self : Tuple ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Dict ):
__a : Any = []
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[str] , **snake_case_ : List[Any] ):
__a : Dict = [i for i in token_ids if i >= 0]
__a : Dict = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[str] ):
return token in self.encoder
def lowerCAmelCase (self : List[Any] , snake_case_ : Union[str, Any] ):
return "".join(UpperCamelCase__ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Optional[Any] ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def lowerCAmelCase (self : Optional[int] , snake_case_ : Any ):
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] = None ):
if os.path.isdir(UpperCamelCase__ ):
__a : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__a : List[Any] = (filename_prefix + "-" if filename_prefix else "") + save_directory
__a : List[str] = 0
if " " in self.encoder:
__a : Any = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
__a : List[str] = self.encoder["\n"]
del self.encoder["\n"]
__a : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case_ : x[1] ) )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
''' Please check that the vocabulary is not corrupted!''' )
__a : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : int , snake_case_ : List[Any] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase (self : str , snake_case_ : Union[str, Any] , snake_case_ : List[Any] = None , snake_case_ : Tuple = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 521 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Tuple = '''longformer'''
def __init__( self , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 1 , UpperCamelCase__ = 0 , UpperCamelCase__ = 2 , UpperCamelCase__ = 3_0522 , UpperCamelCase__ = 768 , UpperCamelCase__ = 12 , UpperCamelCase__ = 12 , UpperCamelCase__ = 3072 , UpperCamelCase__ = "gelu" , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 0.1 , UpperCamelCase__ = 512 , UpperCamelCase__ = 2 , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 1e-12 , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
snake_case : int = attention_window
snake_case : Any = sep_token_id
snake_case : Dict = bos_token_id
snake_case : int = eos_token_id
snake_case : List[str] = vocab_size
snake_case : Dict = hidden_size
snake_case : Optional[int] = num_hidden_layers
snake_case : List[str] = num_attention_heads
snake_case : Dict = hidden_act
snake_case : Union[str, Any] = intermediate_size
snake_case : Optional[int] = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : Any = max_position_embeddings
snake_case : Union[str, Any] = type_vocab_size
snake_case : Union[str, Any] = initializer_range
snake_case : int = layer_norm_eps
snake_case : str = onnx_export
class _lowerCAmelCase ( snake_case_ ):
def __init__( self , UpperCamelCase__ , UpperCamelCase__ = "default" , UpperCamelCase__ = None ) -> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
snake_case : int = True
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("global_attention_mask", dynamic_axis),
] )
@property
def lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
snake_case : str = super().outputs
if self.task == "default":
snake_case : str = {0: "batch"}
return outputs
@property
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
return 1e-4
@property
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ = -1 , UpperCamelCase__ = -1 , UpperCamelCase__ = False , UpperCamelCase__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case : Optional[int] = super().generate_dummy_inputs(
preprocessor=UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
snake_case : Any = torch.zeros_like(inputs["input_ids"] )
# make every second token global
snake_case : Any = 1
return inputs
| 178 | 0 |
"""simple docstring"""
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
'''split_dict''' , [
SplitDict(),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 , dataset_name='''my_dataset''' )} ),
SplitDict({'''train''': SplitInfo(name='''train''' , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({'''train''': SplitInfo()} ),
] , )
def lowercase ( _snake_case : SplitDict ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Tuple = split_dict._to_yaml_list()
assert len(_snake_case ) == len(_snake_case )
__snake_case : Optional[int] = SplitDict._from_yaml_list(_snake_case )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
__snake_case : str = None
# the split name of split_dict takes over the name of the split info object
__snake_case : Any = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
'''split_info''' , [SplitInfo(), SplitInfo(dataset_name=_snake_case ), SplitInfo(dataset_name='''my_dataset''' )] )
def lowercase ( _snake_case : Dict ) ->Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = asdict(SplitDict({'''train''': split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 229 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE : Any = 3
def lowercase ( _snake_case : int ) ->int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
__snake_case : List[Any] = random.randrange(3 , _snake_case )
if pow(_snake_case , 2 , _snake_case ) == 1:
continue
if pow(_snake_case , _snake_case , _snake_case ) == 1:
continue
return g
def lowercase ( _snake_case : int ) ->tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
__snake_case : List[str] = rabin_miller.generate_large_prime(_snake_case ) # select large prime number.
__snake_case : Optional[int] = primitive_root(_snake_case ) # one primitive root on modulo p.
__snake_case : Dict = random.randrange(3 , _snake_case ) # private_key -> have to be greater than 2 for safety.
__snake_case : Any = cryptomath.find_mod_inverse(pow(_snake_case , _snake_case , _snake_case ) , _snake_case )
__snake_case : Union[str, Any] = (key_size, e_a, e_a, p)
__snake_case : Dict = (key_size, d)
return public_key, private_key
def lowercase ( _snake_case : str , _snake_case : int ) ->None:
"""simple docstring"""
if os.path.exists(f"""{name}_pubkey.txt""" ) or os.path.exists(f"""{name}_privkey.txt""" ):
print('''\nWARNING:''' )
print(
f"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
__snake_case , __snake_case : Optional[int] = generate_key(_snake_case )
print(f"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(f"""{name}_pubkey.txt""" , '''w''' ) as fo:
fo.write(f"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(f"""Writing private key to file {name}_privkey.txt...""" )
with open(f"""{name}_privkey.txt""" , '''w''' ) as fo:
fo.write(f"""{private_key[0]},{private_key[1]}""" )
def lowercase ( ) ->None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_048 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 229 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
snake_case__ : Optional[Any] = '''lxmert'''
snake_case__ : Any = {}
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_6_8 , SCREAMING_SNAKE_CASE__ : Dict=1_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=9_5_0_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_6_0_0 , SCREAMING_SNAKE_CASE__ : Any=4_0_0 , SCREAMING_SNAKE_CASE__ : List[Any]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : int=5_1_2 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Tuple=0.02 , SCREAMING_SNAKE_CASE__ : Any=1E-12 , SCREAMING_SNAKE_CASE__ : List[Any]=9 , SCREAMING_SNAKE_CASE__ : Optional[Any]=5 , SCREAMING_SNAKE_CASE__ : Tuple=5 , SCREAMING_SNAKE_CASE__ : Dict=2_0_4_8 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : str=6.67 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ) -> Tuple:
a_ : Tuple = vocab_size
a_ : Optional[Any] = hidden_size
a_ : int = num_attention_heads
a_ : List[str] = hidden_act
a_ : str = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Optional[int] = max_position_embeddings
a_ : List[Any] = type_vocab_size
a_ : List[str] = initializer_range
a_ : Tuple = layer_norm_eps
a_ : Tuple = num_qa_labels
a_ : Tuple = num_object_labels
a_ : Any = num_attr_labels
a_ : List[Any] = l_layers
a_ : List[Any] = x_layers
a_ : str = r_layers
a_ : List[Any] = visual_feat_dim
a_ : int = visual_pos_dim
a_ : int = visual_loss_normalizer
a_ : Optional[Any] = task_matched
a_ : int = task_mask_lm
a_ : Union[str, Any] = task_obj_predict
a_ : List[Any] = task_qa
a_ : Dict = visual_obj_loss
a_ : List[Any] = visual_attr_loss
a_ : str = visual_feat_loss
a_ : str = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowerCAmelCase__ )
| 570 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowercase = logging.get_logger(__name__)
class __snake_case ( snake_case__ ):
"""simple docstring"""
UpperCamelCase_ = ['input_features', 'attention_mask']
def __init__( self : Optional[Any] ,lowerCAmelCase__ : Any=80 ,lowerCAmelCase__ : Optional[Any]=1_60_00 ,lowerCAmelCase__ : List[str]=0.0 ,lowerCAmelCase__ : Tuple=10 ,lowerCAmelCase__ : Optional[Any]=25 ,lowerCAmelCase__ : Any="hamming_window" ,lowerCAmelCase__ : List[str]=32_768.0 ,lowerCAmelCase__ : Union[str, Any]=0.97 ,lowerCAmelCase__ : Any=1.0 ,lowerCAmelCase__ : str=True ,lowerCAmelCase__ : int=True ,lowerCAmelCase__ : Tuple=False ,**lowerCAmelCase__ : Optional[int] ,) -> Optional[Any]:
'''simple docstring'''
super().__init__(feature_size=lowerCAmelCase__ ,sampling_rate=lowerCAmelCase__ ,padding_value=lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = feature_size
lowerCAmelCase_ : List[Any] = sampling_rate
lowerCAmelCase_ : Union[str, Any] = padding_value
lowerCAmelCase_ : str = hop_length
lowerCAmelCase_ : str = win_length
lowerCAmelCase_ : str = frame_signal_scale
lowerCAmelCase_ : Any = preemphasis_coeff
lowerCAmelCase_ : Optional[Any] = mel_floor
lowerCAmelCase_ : List[str] = normalize_means
lowerCAmelCase_ : Optional[Any] = normalize_vars
lowerCAmelCase_ : Dict = win_function
lowerCAmelCase_ : List[Any] = return_attention_mask
lowerCAmelCase_ : Tuple = win_length * sampling_rate // 10_00
lowerCAmelCase_ : str = hop_length * sampling_rate // 10_00
lowerCAmelCase_ : Dict = optimal_fft_length(self.sample_size )
lowerCAmelCase_ : Optional[int] = (self.n_fft // 2) + 1
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : np.array ) -> np.ndarray:
'''simple docstring'''
if self.win_function == "hamming_window":
lowerCAmelCase_ : int = window_function(window_length=self.sample_size ,name=self.win_function ,periodic=lowerCAmelCase__ )
else:
lowerCAmelCase_ : Tuple = window_function(window_length=self.sample_size ,name=self.win_function )
lowerCAmelCase_ : List[str] = mel_filter_bank(
num_frequency_bins=self.n_freqs ,num_mel_filters=self.feature_size ,min_frequency=0.0 ,max_frequency=self.sampling_rate / 2.0 ,sampling_rate=self.sampling_rate ,)
lowerCAmelCase_ : Any = spectrogram(
one_waveform * self.frame_signal_scale ,window=lowerCAmelCase__ ,frame_length=self.sample_size ,hop_length=self.sample_stride ,fft_length=self.n_fft ,center=lowerCAmelCase__ ,preemphasis=self.preemphasis_coeff ,mel_filters=lowerCAmelCase__ ,mel_floor=self.mel_floor ,log_mel="log" ,)
return msfc_features.T
def UpperCAmelCase_ ( self : int ,lowerCAmelCase__ : List[Any] ,lowerCAmelCase__ : Optional[Any] ,lowerCAmelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if self.normalize_means:
lowerCAmelCase_ : Optional[int] = x[:input_length].mean(axis=0 )
lowerCAmelCase_ : List[str] = np.subtract(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.normalize_vars:
lowerCAmelCase_ : Optional[Any] = x[:input_length].std(axis=0 )
lowerCAmelCase_ : Tuple = np.divide(lowerCAmelCase__ ,lowerCAmelCase__ )
if input_length < x.shape[0]:
lowerCAmelCase_ : int = padding_value
# make sure array is in float32
lowerCAmelCase_ : Any = x.astype(np.floataa )
return x
def UpperCAmelCase_ ( self : List[Any] ,lowerCAmelCase__ : List[np.ndarray] ,lowerCAmelCase__ : Optional[np.ndarray] = None ) -> List[np.ndarray]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCAmelCase__ ,lowerCAmelCase__ ,self.padding_value ) for x, n in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
def __call__( self : int ,lowerCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : bool = False ,lowerCAmelCase__ : Optional[int] = None ,lowerCAmelCase__ : Optional[bool] = None ,lowerCAmelCase__ : Optional[Union[str, TensorType]] = None ,lowerCAmelCase__ : Optional[int] = None ,**lowerCAmelCase__ : Union[str, Any] ,) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
lowerCAmelCase_ : List[Any] = isinstance(lowerCAmelCase__ ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase_ : str = is_batched_numpy or (
isinstance(lowerCAmelCase__ ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase_ : Tuple = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase__ ,np.ndarray ):
lowerCAmelCase_ : int = np.asarray(lowerCAmelCase__ ,dtype=np.floataa )
elif isinstance(lowerCAmelCase__ ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase_ : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase_ : Optional[int] = [raw_speech]
# extract fbank features
lowerCAmelCase_ : Dict = [self._extract_mfsc_features(lowerCAmelCase__ ) for one_waveform in raw_speech]
# convert into correct format for padding
lowerCAmelCase_ : int = BatchFeature({"input_features": features} )
lowerCAmelCase_ : Union[str, Any] = self.pad(
lowerCAmelCase__ ,padding=lowerCAmelCase__ ,max_length=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,pad_to_multiple_of=lowerCAmelCase__ ,return_attention_mask=lowerCAmelCase__ ,**lowerCAmelCase__ ,)
# make sure list is in array format
lowerCAmelCase_ : Optional[Any] = padded_inputs.get("input_features" )
if isinstance(input_features[0] ,lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[int] = [np.asarray(lowerCAmelCase__ ,dtype=np.floataa ) for feature in input_features]
lowerCAmelCase_ : List[Any] = padded_inputs.get("attention_mask" )
if attention_mask is not None:
lowerCAmelCase_ : Dict = [np.asarray(lowerCAmelCase__ ,dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
lowerCAmelCase_ : Dict = (
np.array(lowerCAmelCase__ ,dtype=np.intaa )
if self._get_padding_strategies(lowerCAmelCase__ ,max_length=lowerCAmelCase__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
lowerCAmelCase_ : List[str] = self.normalize(
padded_inputs["input_features"] ,attention_mask=lowerCAmelCase__ )
if return_tensors is not None:
lowerCAmelCase_ : Dict = padded_inputs.convert_to_tensors(lowerCAmelCase__ )
return padded_inputs
| 659 | 0 |
'''simple docstring'''
import random
def _lowerCamelCase ( A_ : Union[str, Any] , A_ : Tuple , A_ : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ : str =a[left_index]
UpperCamelCase__ : List[Any] =left_index + 1
for j in range(left_index + 1 , snake_case__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ : str =a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ : List[str] =a[i - 1], a[left_index]
return i - 1
def _lowerCamelCase ( A_ : Tuple , A_ : Optional[Any] , A_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if left < right:
UpperCamelCase__ : str =random.randint(snake_case__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =(
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ : List[str] =partition(snake_case__ , snake_case__ , snake_case__ )
quick_sort_random(
snake_case__ , snake_case__ , snake_case__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
snake_case__ , pivot_index + 1 , snake_case__ ) # recursive quicksort to the right of the pivot point
def _lowerCamelCase ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Dict =input("Enter numbers separated by a comma:\n" ).strip()
UpperCamelCase__ : Optional[Any] =[int(snake_case__ ) for item in user_input.split("," )]
quick_sort_random(snake_case__ , 0 , len(snake_case__ ) )
print(snake_case__ )
if __name__ == "__main__":
main()
| 717 |
from math import factorial
__UpperCAmelCase = {str(digit): factorial(digit) for digit in range(10)}
def _lowerCamelCase ( A_ : int ) -> int:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A_ ) )
def _lowerCamelCase ( A_ : int = 6_0 , A_ : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not isinstance(A_ , A_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
UpperCamelCase__ : str =0
# the cached sizes of the previous chains
UpperCamelCase__ : dict[int, int] ={}
for start_chain_element in range(1 , A_ ):
# The temporary set will contain the elements of the chain
UpperCamelCase__ : Any =set()
UpperCamelCase__ : Optional[Any] =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase__ : str =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A_ )
chain_set_length += 1
UpperCamelCase__ : Tuple =digit_factorial_sum(A_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase__ : List[str] =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 582 | 0 |
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def _lowerCAmelCase ( lowerCamelCase_ : Union[str, Any] ):
if not sentence:
return ""
__lowercase = dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 502 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ = logging.get_logger(__name__)
A__ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a ( __lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase : List[str] = """nat"""
__lowerCAmelCase : Union[str, Any] = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[str] ,__lowercase :int=4 ,__lowercase :int=3 ,__lowercase :Optional[int]=6_4 ,__lowercase :Tuple=[3, 4, 6, 5] ,__lowercase :List[Any]=[2, 4, 8, 1_6] ,__lowercase :Optional[int]=7 ,__lowercase :Optional[int]=3.0 ,__lowercase :List[Any]=True ,__lowercase :List[str]=0.0 ,__lowercase :Optional[Any]=0.0 ,__lowercase :Tuple=0.1 ,__lowercase :Union[str, Any]="gelu" ,__lowercase :str=0.02 ,__lowercase :Optional[Any]=1e-5 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[str]=None ,__lowercase :List[Any]=None ,**__lowercase :Optional[Any] ,):
super().__init__(**__lowercase )
snake_case__ : str = patch_size
snake_case__ : str = num_channels
snake_case__ : Dict = embed_dim
snake_case__ : List[Any] = depths
snake_case__ : Any = len(__lowercase )
snake_case__ : List[str] = num_heads
snake_case__ : Dict = kernel_size
snake_case__ : Optional[int] = mlp_ratio
snake_case__ : Optional[int] = qkv_bias
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : int = hidden_act
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : List[Any] = int(embed_dim * 2 ** (len(__lowercase ) - 1) )
snake_case__ : Union[str, Any] = layer_scale_init_value
snake_case__ : Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 ,len(__lowercase ) + 1 )]
snake_case__ , snake_case__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=__lowercase ,out_indices=__lowercase ,stage_names=self.stage_names )
| 252 | 0 |
def lowercase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ) -> float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198 |
def lowercase ( SCREAMING_SNAKE_CASE__ : int = 1_000 ) -> int:
_snake_case , _snake_case : str = 1, 1
_snake_case : List[Any] = 2
while True:
_snake_case : Union[str, Any] = 0
_snake_case : int = fa + fa
_snake_case , _snake_case : Union[str, Any] = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE__ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 198 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
SCREAMING_SNAKE_CASE = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 99 |
'''simple docstring'''
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : str, UpperCamelCase__ : List[str] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase__, n - 1, UpperCamelCase__ ) * a) % mod
else:
SCREAMING_SNAKE_CASE__ : List[Any] =binary_exponentiation(UpperCamelCase__, n / 2, UpperCamelCase__ )
return (b * b) % mod
# a prime number
a_ = 7_0_1
a_ = 1_0_0_0_0_0_0_0_0_0
a_ = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p) | 296 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
__lowerCamelCase : str = 42
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
_SCREAMING_SNAKE_CASE = namedtuple("CoinsDistribResult", "moves excess")
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE_ : str ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE_ : str ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__snake_case ) != count_coins(__snake_case ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
_lowerCAmelCase , _lowerCAmelCase = get_distrib(node.left )
_lowerCAmelCase , _lowerCAmelCase = get_distrib(node.right )
_lowerCAmelCase = 1 - left_distrib_excess
_lowerCAmelCase = 1 - right_distrib_excess
_lowerCAmelCase = (
left_distrib_moves
+ right_distrib_moves
+ abs(__snake_case )
+ abs(__snake_case )
)
_lowerCAmelCase = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__snake_case , __snake_case )
return get_distrib(__snake_case )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list[float] , SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
_lowerCAmelCase = sorted(numsa + numsa )
_lowerCAmelCase , _lowerCAmelCase = divmod(len(SCREAMING_SNAKE_CASE_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of first array: ").split()]
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 489 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''mobilenet_v2'''
def __init__( self : int , _A : Dict=3 , _A : Optional[int]=224 , _A : int=1.0 , _A : List[Any]=8 , _A : Optional[int]=8 , _A : Any=6 , _A : Any=32 , _A : List[Any]=True , _A : Optional[int]=True , _A : int="relu6" , _A : str=True , _A : List[str]=0.8 , _A : str=0.02 , _A : Optional[Any]=0.0_01 , _A : Optional[Any]=255 , **_A : List[Any] , ):
"""simple docstring"""
super().__init__(**_A )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__SCREAMING_SNAKE_CASE : Tuple = num_channels
__SCREAMING_SNAKE_CASE : List[str] = image_size
__SCREAMING_SNAKE_CASE : List[Any] = depth_multiplier
__SCREAMING_SNAKE_CASE : str = depth_divisible_by
__SCREAMING_SNAKE_CASE : Union[str, Any] = min_depth
__SCREAMING_SNAKE_CASE : int = expand_ratio
__SCREAMING_SNAKE_CASE : str = output_stride
__SCREAMING_SNAKE_CASE : Optional[int] = first_layer_is_expansion
__SCREAMING_SNAKE_CASE : Tuple = finegrained_output
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Any = tf_padding
__SCREAMING_SNAKE_CASE : Any = classifier_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : str = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[Any] = semantic_loss_ignore_index
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = version.parse('''1.11''' )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
return 1e-4
| 74 |
"""simple docstring"""
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _lowerCAmelCase ( *lowerCAmelCase ):
'''simple docstring'''
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase = list(lowerCAmelCase )
for i in range(len(lowerCAmelCase ) ):
UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _lowerCAmelCase ( lowerCAmelCase = None , lowerCAmelCase = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(lowerCAmelCase , starting_batch_size=lowerCAmelCase )
UpperCAmelCase = starting_batch_size
def decorator(*lowerCAmelCase , **lowerCAmelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
UpperCAmelCase = list(inspect.signature(lowerCAmelCase ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase ) < (len(lowerCAmelCase ) + 1):
UpperCAmelCase = """, """.join([F'''{arg}={value}''' for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
F'''Batch size was passed into `{function.__name__}` as the first argument when called.'''
F'''Remove this as the decorator already does so: `{function.__name__}({arg_str})`''' )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 673 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a_ ( unittest.TestCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=4 , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_attention_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_choices
def A__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_attention_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def A__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = True
UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a_ ( __snake_case , unittest.TestCase ):
lowercase = True
lowercase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = FlaxRobertaModelTester(self )
@slow
def A__ ( self ) -> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowercase )
UpperCamelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
| 718 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
UpperCamelCase = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCamelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 0 |
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class __magic_name__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , snake_case_ = 1_28 , snake_case_ = 2_56 , snake_case_ = 20_00.0 , snake_case_ = 7_68 , snake_case_ = 12 , snake_case_ = 12 , snake_case_ = 64 , snake_case_ = 20_48 , snake_case_ = 0.1 , ):
super().__init__()
lowercase =nn.Sequential(
nn.Linear(snake_case_ , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=snake_case_ ) , nn.SiLU() , )
lowercase =nn.Embedding(snake_case_ , snake_case_ )
lowercase =False
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Dropout(p=snake_case_ )
lowercase =nn.ModuleList()
for lyr_num in range(snake_case_ ):
# FiLM conditional T5 decoder
lowercase =DecoderLayer(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
self.decoders.append(snake_case_ )
lowercase =TaLayerNorm(snake_case_ )
lowercase =nn.Dropout(p=snake_case_ )
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
def _A( self , snake_case_ , snake_case_ ):
lowercase =torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _A( self , snake_case_ , snake_case_ , snake_case_ ):
lowercase , lowercase , lowercase =decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase =get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase =self.conditioning_emb(snake_case_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase =decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase =torch.broadcast_to(
torch.arange(snake_case_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase =self.position_encoding(snake_case_ )
lowercase =self.continuous_inputs_projection(snake_case_ )
inputs += position_encodings
lowercase =self.dropout(snake_case_ )
# decoder: No padding present.
lowercase =torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase =[(x, self.encoder_decoder_mask(snake_case_ , snake_case_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase =torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase =torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase =lyr(
snake_case_ , conditioning_emb=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )[0]
lowercase =self.decoder_norm(snake_case_ )
lowercase =self.post_dropout(snake_case_ )
lowercase =self.spec_out(snake_case_ )
return spec_out
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=1E-6 ):
super().__init__()
lowercase =nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=snake_case_ , d_kv=snake_case_ , num_heads=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ , layer_norm_epsilon=snake_case_ ) )
def _A( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ):
lowercase =self.layer[0](
snake_case_ , conditioning_emb=snake_case_ , attention_mask=snake_case_ , )
if encoder_hidden_states is not None:
lowercase =torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowercase =self.layer[1](
snake_case_ , key_value_states=snake_case_ , attention_mask=snake_case_ , )
# Apply Film Conditional Feed Forward layer
lowercase =self.layer[-1](snake_case_ , snake_case_ )
return (hidden_states,)
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =TaLayerNorm(snake_case_ )
lowercase =TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
lowercase =Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
def _A( self , snake_case_ , snake_case_=None , snake_case_=None , ):
# pre_self_attention_layer_norm
lowercase =self.layer_norm(snake_case_ )
if conditioning_emb is not None:
lowercase =self.FiLMLayer(snake_case_ , snake_case_ )
# Self-attention block
lowercase =self.attention(snake_case_ )
lowercase =hidden_states + self.dropout(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =Attention(query_dim=snake_case_ , heads=snake_case_ , dim_head=snake_case_ , out_bias=snake_case_ , scale_qk=snake_case_ )
lowercase =TaLayerNorm(snake_case_ , eps=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
def _A( self , snake_case_ , snake_case_=None , snake_case_=None , ):
lowercase =self.layer_norm(snake_case_ )
lowercase =self.attention(
snake_case_ , encoder_hidden_states=snake_case_ , attention_mask=attention_mask.squeeze(1 ) , )
lowercase =hidden_states + self.dropout(snake_case_ )
return layer_output
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =TaDenseGatedActDense(d_model=snake_case_ , d_ff=snake_case_ , dropout_rate=snake_case_ )
lowercase =TaFiLMLayer(in_features=d_model * 4 , out_features=snake_case_ )
lowercase =TaLayerNorm(snake_case_ , eps=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
def _A( self , snake_case_ , snake_case_=None ):
lowercase =self.layer_norm(snake_case_ )
if conditioning_emb is not None:
lowercase =self.film(snake_case_ , snake_case_ )
lowercase =self.DenseReluDense(snake_case_ )
lowercase =hidden_states + self.dropout(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
super().__init__()
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
lowercase =nn.Dropout(snake_case_ )
lowercase =NewGELUActivation()
def _A( self , snake_case_ ):
lowercase =self.act(self.wi_a(snake_case_ ) )
lowercase =self.wi_a(snake_case_ )
lowercase =hidden_gelu * hidden_linear
lowercase =self.dropout(snake_case_ )
lowercase =self.wo(snake_case_ )
return hidden_states
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_=1E-6 ):
super().__init__()
lowercase =nn.Parameter(torch.ones(snake_case_ ) )
lowercase =eps
def _A( self , snake_case_ ):
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
lowercase =hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=snake_case_ )
lowercase =hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase =hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class __magic_name__ ( nn.Module ):
def _A( self , snake_case_ ):
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(snake_case_ , 3.0 )) ))
class __magic_name__ ( nn.Module ):
def __init__( self , snake_case_ , snake_case_ ):
super().__init__()
lowercase =nn.Linear(snake_case_ , out_features * 2 , bias=snake_case_ )
def _A( self , snake_case_ , snake_case_ ):
lowercase =self.scale_bias(snake_case_ )
lowercase , lowercase =torch.chunk(snake_case_ , 2 , -1 )
lowercase =x * (1 + scale) + shift
return x
| 72 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" ,"""False""" ) ) is not True ,reason="""Skipping test because should only be run when releasing minor transformers version""" ,)
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def snake_case__ ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='''utf-8''' , check=a__ , )
assert hasattr(self , '''env''' )
def snake_case__ ( self : str , a__ : int=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-single''' , instance_count=a__ , instance_type=self.instance_type , debugger_hook_config=a__ , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def snake_case__ ( self : Optional[int] , a__ : Tuple ):
TrainingJobAnalytics(a__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
def snake_case__ ( self : Any ):
# create estimator
__magic_name__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__magic_name__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__magic_name__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , a__ )
| 432 | 0 |
'''simple docstring'''
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = k_size // 2
_UpperCAmelCase : Dict = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_UpperCAmelCase : List[str] = 1 / (2 * pi * sigma) * exp(-(square(__lowerCAmelCase ) + square(__lowerCAmelCase )) / (2 * square(__lowerCAmelCase )) )
return g
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = image.shape[0], image.shape[1]
# dst image height and width
_UpperCAmelCase : Tuple = height - k_size + 1
_UpperCAmelCase : Optional[int] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_UpperCAmelCase : Union[str, Any] = zeros((dst_height * dst_width, k_size * k_size) )
_UpperCAmelCase : List[Any] = 0
for i, j in product(range(__lowerCAmelCase ) , range(__lowerCAmelCase ) ):
_UpperCAmelCase : Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
_UpperCAmelCase : List[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_UpperCAmelCase : Tuple = gen_gaussian_kernel(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = ravel(__lowerCAmelCase )
# reshape and get the dst image
_UpperCAmelCase : List[str] = dot(__lowerCAmelCase , __lowerCAmelCase ).reshape(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
return dst
if __name__ == "__main__":
# read original image
lowerCamelCase__ = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
lowerCamelCase__ = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCamelCase__ = gaussian_filter(gray, 3, sigma=1)
lowerCamelCase__ = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 712 |
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : str=4 , lowerCAmelCase_ : Any=2 , lowerCAmelCase_ : List[Any]=7 , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Any=True , lowerCAmelCase_ : Dict=True , lowerCAmelCase_ : List[str]=9_9 , lowerCAmelCase_ : Tuple=3_6 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : Optional[int]=3_7 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Dict=0.1 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Optional[Any]=5_1_2 , lowerCAmelCase_ : str=1_6 , lowerCAmelCase_ : List[str]=2 , lowerCAmelCase_ : Dict=0.02 , lowerCAmelCase_ : Optional[Any]=6 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : Dict=3 , lowerCAmelCase_ : List[Any]=4 , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : str=1_0_0_0 , ) -> List[str]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = is_training
__lowerCAmelCase = use_input_mask
__lowerCAmelCase = use_token_type_ids
__lowerCAmelCase = use_labels
__lowerCAmelCase = vocab_size
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = type_vocab_size
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = coordinate_size
__lowerCAmelCase = shape_size
__lowerCAmelCase = num_labels
__lowerCAmelCase = num_choices
__lowerCAmelCase = scope
__lowerCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCAmelCase = text_seq_length
__lowerCAmelCase = (image_size // patch_size) ** 2 + 1
__lowerCAmelCase = self.text_seq_length + self.image_seq_length
def lowercase ( self : str ) -> Optional[int]:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCAmelCase = bbox[i, j, 3]
__lowerCAmelCase = bbox[i, j, 1]
__lowerCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCAmelCase = bbox[i, j, 2]
__lowerCAmelCase = bbox[i, j, 0]
__lowerCAmelCase = t
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_input_mask:
__lowerCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCAmelCase = None
if self.use_token_type_ids:
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCAmelCase = None
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> List[str]:
__lowerCAmelCase = LayoutLMvaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# text + image
__lowerCAmelCase = model(lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = model(lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCAmelCase = model(pixel_values=lowerCAmelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase ( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any ) -> Tuple:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LayoutLMvaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> Optional[Any]:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = LayoutLMvaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int ) -> Any:
__lowerCAmelCase = LayoutLMvaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(
lowerCAmelCase_ , bbox=lowerCAmelCase_ , pixel_values=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : Union[str, Any] ) -> int:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = False
a_ = False
a_ = False
a_ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ = (
{'document-question-answering': LayoutLMvaForQuestionAnswering, 'feature-extraction': LayoutLMvaModel}
if is_torch_available()
else {}
)
def lowercase ( self : str , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> Dict:
return True
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = LayoutLMvaModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=False ) -> List[str]:
__lowerCAmelCase = copy.deepcopy(lowerCAmelCase_ )
if model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(lowerCAmelCase_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in get_values(lowerCAmelCase_ ):
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
__lowerCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
elif model_class in [
*get_values(lowerCAmelCase_ ),
]:
__lowerCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=lowerCAmelCase_ , )
return inputs_dict
def lowercase ( self : List[Any] ) -> Dict:
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowercase ( self : Union[str, Any] ) -> List[str]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
def lowercase ( self : str ) -> List[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
@slow
def lowercase ( self : Union[str, Any] ) -> List[Any]:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = LayoutLMvaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : List[Any] ) -> Optional[int]:
return LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ ) if is_vision_available() else None
@slow
def lowercase ( self : List[Any] ) -> Any:
__lowerCAmelCase = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).pixel_values.to(lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([[1, 2]] )
__lowerCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__lowerCAmelCase = model(
input_ids=input_ids.to(lowerCAmelCase_ ) , bbox=bbox.to(lowerCAmelCase_ ) , pixel_values=pixel_values.to(lowerCAmelCase_ ) , )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
| 53 |
'''simple docstring'''
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A) , __A)
return number - int(__A)
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 11 | 0 |
import operator as op
def _UpperCAmelCase (UpperCamelCase_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
_lowerCAmelCase : Dict = lambda UpperCamelCase_ , UpperCamelCase_ : int(x / y ) # noqa: E731 integer division operation
_lowerCAmelCase : Optional[int] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_lowercase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowercase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ )
else:
_lowerCAmelCase : Dict = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ )
_lowerCAmelCase : Any = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ )
stack.append(
str(opr[x](int(_lowercase ) , int(_lowercase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_lowercase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
_lowerCamelCase : int = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 718 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase : Dict = pd.read_csv(
"https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"
"position_salaries.csv"
)
_lowerCamelCase : Union[str, Any] = dataset.iloc[:, 1:2].values
_lowerCamelCase : Any = dataset.iloc[:, 2].values
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[int] = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase : Optional[Any] = PolynomialFeatures(degree=4)
_lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
_lowerCamelCase : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def _UpperCAmelCase ():
'''simple docstring'''
plt.scatter(UpperCamelCase_ , UpperCamelCase_ , color="""red""" )
plt.plot(UpperCamelCase_ , pol_reg.predict(poly_reg.fit_transform(UpperCamelCase_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 196 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
__snake_case :List[Any] ='Create a default config file for Accelerate with only a few flags set.'
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any]="no" , lowerCAmelCase__ : str = default_json_config_file , lowerCAmelCase__ : bool = False ) -> str:
'''simple docstring'''
A = Path(lowerCAmelCase__ )
path.parent.mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
A = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
A = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
A = torch.cuda.device_count()
A = num_gpus
A = False
if num_gpus > 1:
A = 'MULTI_GPU'
else:
A = 'NO'
elif is_xpu_available() and use_xpu:
A = torch.xpu.device_count()
A = num_xpus
A = False
if num_xpus > 1:
A = 'MULTI_XPU'
else:
A = 'NO'
elif is_npu_available():
A = torch.npu.device_count()
A = num_npus
A = False
if num_npus > 1:
A = 'MULTI_NPU'
else:
A = 'NO'
else:
A = 0
A = True
A = 1
A = 'NO'
A = ClusterConfig(**lowerCAmelCase__ )
config.to_json_file(lowerCAmelCase__ )
return path
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A = parser.add_parser('default' , parents=lowerCAmelCase__ , help=lowerCAmelCase__ , formatter_class=lowerCAmelCase__ )
parser.add_argument(
'--config_file' , default=lowerCAmelCase__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=lowerCAmelCase__ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Tuple:
'''simple docstring'''
A = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' ) | 106 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _lowerCamelCase ( nn.Module ):
def __init__( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
super().__init__()
lowerCAmelCase__ : Optional[int] = nn.Linear(3 , 4 )
lowerCAmelCase__ : int = nn.BatchNormad(4 )
lowerCAmelCase__ : Optional[Any] = nn.Linear(4 , 5 )
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(UpperCamelCase ) ) )
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Any = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , model.state_dict() )
lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , """index.json""" )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase__ : List[str] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
lowerCAmelCase__ : Union[str, Any] = torch.randn(2 , 3 , dtype=UpperCamelCase )
with TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : Optional[Any] = offload_weight(UpperCamelCase , """weight""" , UpperCamelCase , {} )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , """weight.dat""" )
self.assertTrue(os.path.isfile(UpperCamelCase ) )
self.assertDictEqual(UpperCamelCase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(UpperCamelCase ).split(""".""" )[1]}} )
lowerCAmelCase__ : Any = load_offloaded_weight(UpperCamelCase , index["""weight"""] )
self.assertTrue(torch.equal(UpperCamelCase , UpperCamelCase ) )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = ModelForTest()
lowerCAmelCase__ : Optional[Any] = model.state_dict()
lowerCAmelCase__ : Tuple = {k: v for k, v in state_dict.items() if """linear2""" not in k}
lowerCAmelCase__ : Any = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : str = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) )
lowerCAmelCase__ : str = {k: v for k, v in state_dict.items() if """weight""" in k}
lowerCAmelCase__ : str = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Any = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(UpperCamelCase , UpperCamelCase )
# Duplicates are removed
lowerCAmelCase__ : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase , save_folder=UpperCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(UpperCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(UpperCamelCase , weight_map[key] ) )
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
lowerCAmelCase__ : Any = extract_submodules_state_dict(UpperCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCamelCase , {"""a.1""": 0, """a.2""": 2} )
lowerCAmelCase__ : str = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
lowerCAmelCase__ : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(UpperCamelCase , {"""a.1.a""": 0, """a.2.a""": 2} )
| 299 | 0 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
logging.set_verbosity_info()
def __A ( _A , _A ):
"""simple docstring"""
if "xprophetnet" in prophetnet_checkpoint_path:
__a = XLMProphetNetForConditionalGenerationOld.from_pretrained(_A )
__a , __a = XLMProphetNetForConditionalGeneration.from_pretrained(
_A , output_loading_info=_A )
else:
__a = ProphetNetForConditionalGenerationOld.from_pretrained(_A )
__a , __a = ProphetNetForConditionalGeneration.from_pretrained(
_A , output_loading_info=_A )
__a = ["key_proj", "value_proj", "query_proj"]
__a = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
__a = key.split("." )
if attributes[0] == "lm_head":
__a = prophet
__a = prophet_old
else:
__a = prophet.prophetnet
__a = prophet_old.model
__a = False
for attribute in attributes:
if attribute in mapping:
__a = mapping[attribute]
if not hasattr(_A , _A ) and len(_A ) > 0:
__a = attribute
elif hasattr(_A , _A ):
__a = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__a = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__a = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__a = old_model.bias
logger.info(f"""{attribute} is initialized""" )
__a = True
break
elif attribute in special_keys and hasattr(_A , "in_proj_weight" ):
__a = old_model.in_proj_weight.shape[0] // 3
__a = getattr(_A , _A )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__a = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__a = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__a = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__a = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__a = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__a = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__a = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__a = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__a = True
break
if attribute.isdigit():
__a = model[int(_A )]
__a = old_model[int(_A )]
else:
__a = getattr(_A , _A )
if old_attribute == "":
__a = old_model
else:
if not hasattr(_A , _A ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__a = getattr(_A , _A )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 525 | import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE : List[Any] = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class A_ :
_SCREAMING_SNAKE_CASE = PegasusConfig
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = """gelu"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any=13 , __SCREAMING_SNAKE_CASE : Any=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Optional[int]=99 , __SCREAMING_SNAKE_CASE : Optional[int]=32 , __SCREAMING_SNAKE_CASE : List[str]=5 , __SCREAMING_SNAKE_CASE : Tuple=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : str=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.1 , __SCREAMING_SNAKE_CASE : Tuple=20 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : Dict=1 , __SCREAMING_SNAKE_CASE : List[Any]=0 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = eos_token_id
__a = pad_token_id
__a = bos_token_id
def _UpperCAmelCase ( self : Optional[int] ):
__a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__a = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__a = np.concatenate([input_ids, eos_tensor] , axis=1 )
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__a = prepare_pegasus_inputs_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ):
__a = 20
__a = model_class_name(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ):
__a = 20
__a = model_class_name(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] )
__a , __a = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__a = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__a = model.init_cache(decoder_input_ids.shape[0] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__a = model.decode(
decoder_input_ids[:, :-1] , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , past_key_values=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__a = model.decode(
decoder_input_ids[:, -1:] , __SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__SCREAMING_SNAKE_CASE , decoder_position_ids=__SCREAMING_SNAKE_CASE , )
__a = model.decode(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE )
__a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __A ( _A , _A , _A , _A=None , _A=None , ):
"""simple docstring"""
if attention_mask is None:
__a = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__a = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class A_ ( a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
_SCREAMING_SNAKE_CASE = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _UpperCAmelCase ( self : int ):
__a = FlaxPegasusModelTester(self )
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Optional[int] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = model_class(__SCREAMING_SNAKE_CASE )
@jax.jit
def encode_jitted(__SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ):
return model.encode(input_ids=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
with self.subTest("JIT Enabled" ):
__a = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = encode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCAmelCase ( self : List[str] ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = model_class(__SCREAMING_SNAKE_CASE )
__a = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__a = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple ):
return model.decode(
decoder_input_ids=__SCREAMING_SNAKE_CASE , decoder_attention_mask=__SCREAMING_SNAKE_CASE , encoder_outputs=__SCREAMING_SNAKE_CASE , )
with self.subTest("JIT Enabled" ):
__a = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__a = decode_jitted(**__SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__SCREAMING_SNAKE_CASE )
__a = np.ones((1, 1) )
__a = model(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : str ):
__a = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__a = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__a = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__a = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__a = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="np" , truncation=__SCREAMING_SNAKE_CASE , max_length=5_12 , padding=__SCREAMING_SNAKE_CASE )
__a = model.generate(**__SCREAMING_SNAKE_CASE , num_beams=2 ).sequences
__a = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE )
assert tgt_text == decoded
| 525 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: Dict ) -> List[str]:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
lowercase__ = 5
# Realm tok
lowercase__ = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase__ = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
lowercase__ = os.path.join(UpperCamelCase_ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase__ = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
def lowerCamelCase_ ( self: str ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = RealmConfig(num_block_records=self.num_block_records )
return config
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCamelCase_ , )
return block_records
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
lowercase__ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def lowerCamelCase_ ( self: int ) -> Dict:
"""simple docstring"""
lowercase__ = self.get_config()
lowercase__ = self.get_dummy_retriever()
lowercase__ = retriever.tokenizer
lowercase__ = np.array([0, 3] , dtype='''long''' )
lowercase__ = tokenizer(['''Test question'''] ).input_ids
lowercase__ = tokenizer(
['''the fourth'''] , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ).input_ids
lowercase__ = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__ = retriever(
UpperCamelCase_ , UpperCamelCase_ , answer_ids=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(len(UpperCamelCase_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def lowerCamelCase_ ( self: Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_config()
lowercase__ = self.get_dummy_retriever()
lowercase__ = retriever.tokenizer
lowercase__ = np.array([0, 3, 5] , dtype='''long''' )
lowercase__ = tokenizer(['''Test question'''] ).input_ids
lowercase__ = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , ).input_ids
lowercase__ = config.reader_seq_len
lowercase__ , lowercase__ , lowercase__ , lowercase__ = retriever(
UpperCamelCase_ , UpperCamelCase_ , answer_ids=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCamelCase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCamelCase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowercase__ = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowercase__ = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase__ = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 43 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for param in module.parameters():
lowercase__ = False
def _a ( ):
"""simple docstring"""
lowercase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowercase__ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( ):
"""simple docstring"""
lowercase__ = datetime.now()
lowercase__ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 1 |
import math
import unittest
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int ) -> bool:
assert isinstance(__magic_name__ , __magic_name__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__magic_name__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Union[str, Any] ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def _A ( self : Optional[Any] ):
with self.assertRaises(__lowerCamelCase ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , """Zero doesn't have any positive factors, primes must have exactly two.""" , )
self.assertFalse(
is_prime(1 ) , """One only has 1 positive factor, primes must have exactly two.""" , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 716 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : jnp.ndarray
snake_case__ : jnp.ndarray
class _SCREAMING_SNAKE_CASE ( nn.Module ):
snake_case__ : int
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
snake_case__ : jnp.dtype = jnp.floataa
def _A ( self : Any ):
UpperCamelCase :Union[str, Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCamelCase :List[str] = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCamelCase :Optional[Any] = self.block_out_channels[i]
UpperCamelCase :List[Any] = self.block_out_channels[i + 1]
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__lowerCamelCase )
UpperCamelCase :Tuple = blocks
UpperCamelCase :Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Dict , __lowerCamelCase : Dict ):
UpperCamelCase :Tuple = self.conv_in(__lowerCamelCase )
UpperCamelCase :Optional[Any] = nn.silu(__lowerCamelCase )
for block in self.blocks:
UpperCamelCase :Tuple = block(__lowerCamelCase )
UpperCamelCase :List[str] = nn.silu(__lowerCamelCase )
UpperCamelCase :Dict = self.conv_out(__lowerCamelCase )
return embedding
@flax_register_to_config
class _SCREAMING_SNAKE_CASE ( nn.Module , _a , _a ):
snake_case__ : int = 3_2
snake_case__ : int = 4
snake_case__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case__ : Union[bool, Tuple[bool]] = False
snake_case__ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
snake_case__ : int = 2
snake_case__ : Union[int, Tuple[int]] = 8
snake_case__ : Optional[Union[int, Tuple[int]]] = None
snake_case__ : int = 1_2_8_0
snake_case__ : float = 0.0
snake_case__ : bool = False
snake_case__ : jnp.dtype = jnp.floataa
snake_case__ : bool = True
snake_case__ : int = 0
snake_case__ : str = "rgb"
snake_case__ : Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _A ( self : int , __lowerCamelCase : jax.random.KeyArray ):
# init input tensors
UpperCamelCase :int = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCamelCase :Union[str, Any] = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase :int = jnp.ones((1,) , dtype=jnp.intaa )
UpperCamelCase :Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCamelCase :Tuple = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCamelCase :Tuple = jnp.zeros(__lowerCamelCase , dtype=jnp.floataa )
UpperCamelCase , UpperCamelCase :int = jax.random.split(__lowerCamelCase )
UpperCamelCase :Union[str, Any] = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )["params"]
def _A ( self : int ):
UpperCamelCase :Dict = self.block_out_channels
UpperCamelCase :Tuple = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCamelCase :List[Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCamelCase :Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCamelCase :Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCamelCase :Tuple = FlaxTimestepEmbedding(__lowerCamelCase , dtype=self.dtype )
UpperCamelCase :List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCamelCase :Union[str, Any] = self.only_cross_attention
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :List[Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase :str = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCamelCase :int = []
UpperCamelCase :str = []
UpperCamelCase :str = block_out_channels[0]
UpperCamelCase :Optional[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCamelCase :List[str] = output_channel
UpperCamelCase :Optional[Any] = block_out_channels[i]
UpperCamelCase :Tuple = i == len(__lowerCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCamelCase :List[Any] = FlaxCrossAttnDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCamelCase :List[Any] = FlaxDownBlockaD(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowerCamelCase )
for _ in range(self.layers_per_block ):
UpperCamelCase :List[Any] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
if not is_final_block:
UpperCamelCase :str = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__lowerCamelCase )
UpperCamelCase :Optional[Any] = down_blocks
UpperCamelCase :Optional[Any] = controlnet_down_blocks
# mid
UpperCamelCase :str = block_out_channels[-1]
UpperCamelCase :Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=__lowerCamelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCamelCase :List[str] = nn.Conv(
__lowerCamelCase , kernel_size=(1, 1) , padding="""VALID""" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : float = 1.0 , __lowerCamelCase : bool = True , __lowerCamelCase : bool = False , ):
UpperCamelCase :Dict = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCamelCase :List[Any] = jnp.flip(__lowerCamelCase , axis=1 )
# 1. time
if not isinstance(__lowerCamelCase , jnp.ndarray ):
UpperCamelCase :Any = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowerCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCamelCase :Any = timesteps.astype(dtype=jnp.floataa )
UpperCamelCase :Optional[Any] = jnp.expand_dims(__lowerCamelCase , 0 )
UpperCamelCase :Optional[Any] = self.time_proj(__lowerCamelCase )
UpperCamelCase :Any = self.time_embedding(__lowerCamelCase )
# 2. pre-process
UpperCamelCase :int = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Dict = self.conv_in(__lowerCamelCase )
UpperCamelCase :Any = jnp.transpose(__lowerCamelCase , (0, 2, 3, 1) )
UpperCamelCase :Optional[int] = self.controlnet_cond_embedding(__lowerCamelCase )
sample += controlnet_cond
# 3. down
UpperCamelCase :int = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
UpperCamelCase , UpperCamelCase :Optional[Any] = down_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
else:
UpperCamelCase , UpperCamelCase :Union[str, Any] = down_block(__lowerCamelCase , __lowerCamelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCamelCase :List[str] = self.mid_block(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , deterministic=not train )
# 5. contronet blocks
UpperCamelCase :str = ()
for down_block_res_sample, controlnet_block in zip(__lowerCamelCase , self.controlnet_down_blocks ):
UpperCamelCase :Any = controlnet_block(__lowerCamelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCamelCase :Optional[Any] = controlnet_down_block_res_samples
UpperCamelCase :str = self.controlnet_mid_block(__lowerCamelCase )
# 6. scaling
UpperCamelCase :str = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__lowerCamelCase , mid_block_res_sample=__lowerCamelCase )
| 590 | 0 |
"""simple docstring"""
import argparse
import copy
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : List[str] = {}
with open(SCREAMING_SNAKE_CASE__ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case_ : str = []
_list.append([line.split()[1], line.split()[2]] )
snake_case_ : Dict = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case_ : Union[str, Any] = []
_list.append([line.split()[0], line.split()[2]] )
snake_case_ : Any = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ ) as f:
snake_case_ : List[Any] = f.read(1 )
snake_case_ : List[Any] = start_node
snake_case_ : Tuple = []
snake_case_ : Union[str, Any] = start_node
snake_case_ : Optional[Any] = 0
while visiting not in first_solution:
snake_case_ : Optional[int] = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE__ ) and k[0] not in first_solution:
snake_case_ : List[str] = k[1]
snake_case_ : Dict = k[0]
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : Any = distance_of_first_solution + int(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = best_node
first_solution.append(SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case_ : Union[str, Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
snake_case_ : str = []
for n in solution[1:-1]:
snake_case_ : Union[str, Any] = solution.index(SCREAMING_SNAKE_CASE__ )
for kn in solution[1:-1]:
snake_case_ : str = solution.index(SCREAMING_SNAKE_CASE__ )
if n == kn:
continue
snake_case_ : List[Any] = copy.deepcopy(SCREAMING_SNAKE_CASE__ )
snake_case_ : Union[str, Any] = kn
snake_case_ : Union[str, Any] = n
snake_case_ : Optional[Any] = 0
for k in _tmp[:-1]:
snake_case_ : List[Any] = _tmp[_tmp.index(SCREAMING_SNAKE_CASE__ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case_ : int = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE__ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case_ : Tuple = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE__ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
snake_case_ : Dict = 1
snake_case_ : Dict = first_solution
snake_case_ : Optional[int] = []
snake_case_ : Any = distance_of_first_solution
snake_case_ : Dict = solution
while count <= iters:
snake_case_ : str = find_neighborhood(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ : List[Any] = 0
snake_case_ : Union[str, Any] = neighborhood[index_of_best_solution]
snake_case_ : Optional[Any] = len(SCREAMING_SNAKE_CASE__ ) - 1
snake_case_ : Union[str, Any] = False
while not found:
snake_case_ : Union[str, Any] = 0
while i < len(SCREAMING_SNAKE_CASE__ ):
if best_solution[i] != solution[i]:
snake_case_ : Optional[Any] = best_solution[i]
snake_case_ : Any = solution[i]
break
snake_case_ : Tuple = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case_ : List[str] = True
snake_case_ : List[str] = best_solution[:-1]
snake_case_ : str = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case_ : Tuple = cost
snake_case_ : str = solution
else:
snake_case_ : Optional[Any] = index_of_best_solution + 1
snake_case_ : str = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE__ ) >= size:
tabu_list.pop(0 )
snake_case_ : Dict = count + 1
return best_solution_ever, best_cost
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
"""simple docstring"""
snake_case_ : int = generate_neighbours(args.File )
snake_case_ , snake_case_ : Union[str, Any] = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE__ )
snake_case_ , snake_case_ : Any = tabu_search(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , args.Iterations , args.Size , )
print(f'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
a_ = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 480 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@require_torch
def __UpperCamelCase (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : int = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
snake_case_ : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
snake_case_ : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
snake_case_ : Tuple = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase__ )
BertModel.from_pretrained(lowercase__ )
BertTokenizer.from_pretrained(lowercase__ )
pipeline(task="""fill-mask""" , model=lowercase__ )
# baseline - just load from_pretrained with normal network
snake_case_ : Optional[Any] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
snake_case_ : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : str = """1"""
snake_case_ : List[Any] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : List[str] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
snake_case_ : int = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
snake_case_ : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
snake_case_ : Optional[int] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase__ )
BertModel.from_pretrained(lowercase__ )
BertTokenizer.from_pretrained(lowercase__ )
pipeline(task="""fill-mask""" , model=lowercase__ )
# baseline - just load from_pretrained with normal network
snake_case_ : int = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
snake_case_ : List[Any] = self.get_env()
snake_case_ : Dict = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
snake_case_ : Dict = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
snake_case_ : int = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
snake_case_ : List[Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
snake_case_ : List[str] = self.get_env()
snake_case_ : str = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
snake_case_ : Any = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : Optional[Any] = """1"""
snake_case_ : int = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : str = """
from transformers import pipeline
"""
snake_case_ : Dict = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
snake_case_ : Dict = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
snake_case_ : List[str] = self.get_env()
snake_case_ : Dict = """1"""
snake_case_ : int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
snake_case_ : Optional[int] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : int = """
from transformers import AutoModel
"""
snake_case_ : Optional[int] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
snake_case_ : Dict = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
snake_case_ : Optional[Any] = self.get_env()
snake_case_ : List[str] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : Any = """1"""
snake_case_ : Dict = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 480 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=18 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=True , ) -> int:
"""simple docstring"""
__magic_name__ : Any = parent
__magic_name__ : Optional[Any] = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : str = image_size
__magic_name__ : Any = min_resolution
__magic_name__ : List[str] = max_resolution
__magic_name__ : Optional[Any] = do_resize
__magic_name__ : str = size_divisor
__magic_name__ : Optional[int] = do_rescale
def lowercase ( self ) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase__ : Optional[int] =GLPNImageProcessor if is_vision_available() else None
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = GLPNImageProcessingTester(self )
@property
def lowercase ( self ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''size_divisor''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''resample''' ) )
self.assertTrue(hasattr(lowerCamelCase , '''do_rescale''' ) )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase ( self ) -> List[str]:
"""simple docstring"""
__magic_name__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__magic_name__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowercase ( self ) -> str:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__magic_name__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
__magic_name__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 701 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Tuple ="wav2vec2"
def __init__( self , lowerCamelCase=32 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=3072 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=0.0_2 , lowerCamelCase=1e-5 , lowerCamelCase="group" , lowerCamelCase="gelu" , lowerCamelCase=(512, 512, 512, 512, 512, 512, 512) , lowerCamelCase=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase=False , lowerCamelCase=128 , lowerCamelCase=16 , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=0.0_5 , lowerCamelCase=10 , lowerCamelCase=2 , lowerCamelCase=0.0 , lowerCamelCase=10 , lowerCamelCase=0 , lowerCamelCase=320 , lowerCamelCase=2 , lowerCamelCase=0.1 , lowerCamelCase=100 , lowerCamelCase=256 , lowerCamelCase=256 , lowerCamelCase=0.1 , lowerCamelCase="sum" , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=256 , lowerCamelCase=(512, 512, 512, 512, 1500) , lowerCamelCase=(5, 3, 3, 1, 1) , lowerCamelCase=(1, 2, 3, 1, 1) , lowerCamelCase=512 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=False , lowerCamelCase=3 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = feat_extract_norm
__magic_name__ : Union[str, Any] = feat_extract_activation
__magic_name__ : Union[str, Any] = list(lowerCamelCase )
__magic_name__ : Any = list(lowerCamelCase )
__magic_name__ : int = list(lowerCamelCase )
__magic_name__ : List[str] = conv_bias
__magic_name__ : Optional[Any] = num_conv_pos_embeddings
__magic_name__ : Tuple = num_conv_pos_embedding_groups
__magic_name__ : Optional[Any] = len(self.conv_dim )
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : Optional[Any] = intermediate_size
__magic_name__ : int = hidden_act
__magic_name__ : Optional[int] = num_attention_heads
__magic_name__ : Tuple = hidden_dropout
__magic_name__ : Any = attention_dropout
__magic_name__ : Tuple = activation_dropout
__magic_name__ : int = feat_proj_dropout
__magic_name__ : List[str] = final_dropout
__magic_name__ : Tuple = layerdrop
__magic_name__ : str = layer_norm_eps
__magic_name__ : Optional[int] = initializer_range
__magic_name__ : Dict = vocab_size
__magic_name__ : Optional[Any] = do_stable_layer_norm
__magic_name__ : Any = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : str = apply_spec_augment
__magic_name__ : List[str] = mask_time_prob
__magic_name__ : Optional[int] = mask_time_length
__magic_name__ : int = mask_time_min_masks
__magic_name__ : Optional[Any] = mask_feature_prob
__magic_name__ : List[str] = mask_feature_length
__magic_name__ : List[str] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ : int = num_codevectors_per_group
__magic_name__ : Dict = num_codevector_groups
__magic_name__ : str = contrastive_logits_temperature
__magic_name__ : List[str] = feat_quantizer_dropout
__magic_name__ : Union[str, Any] = num_negatives
__magic_name__ : Tuple = codevector_dim
__magic_name__ : List[str] = proj_codevector_dim
__magic_name__ : Any = diversity_loss_weight
# ctc loss
__magic_name__ : Tuple = ctc_loss_reduction
__magic_name__ : Dict = ctc_zero_infinity
# adapter
__magic_name__ : str = add_adapter
__magic_name__ : List[str] = adapter_kernel_size
__magic_name__ : str = adapter_stride
__magic_name__ : Dict = num_adapter_layers
__magic_name__ : str = output_hidden_size or hidden_size
__magic_name__ : Dict = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__magic_name__ : Dict = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__magic_name__ : Union[str, Any] = list(lowerCamelCase )
__magic_name__ : List[Any] = list(lowerCamelCase )
__magic_name__ : Optional[Any] = list(lowerCamelCase )
__magic_name__ : List[Any] = xvector_output_dim
@property
def lowercase ( self ) -> Any:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 336 | 0 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class a (__lowercase ):
"""simple docstring"""
__UpperCAmelCase : str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
__UpperCAmelCase : ClassVar[Features] = Features({"text": Value("string" )} )
__UpperCAmelCase : ClassVar[Features] = Features({"summary": Value("string" )} )
__UpperCAmelCase : str = "text"
__UpperCAmelCase : str = "summary"
@property
def __snake_case ( self : Any ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 81 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase :str = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase :List[Any] = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
__lowerCamelCase :Tuple = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int =PRETRAINED_INIT_CONFIGURATION
snake_case__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str =BertTokenizer
def __init__( self: str , __a: Union[str, Any]=None , __a: Tuple=None , __a: int=True , __a: List[str]="[UNK]" , __a: Optional[Any]="[SEP]" , __a: Union[str, Any]="[PAD]" , __a: Optional[Any]="[CLS]" , __a: Optional[Any]="[MASK]" , __a: List[Any]=True , __a: int=None , **__a: Union[str, Any] , )-> int:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
lowerCamelCase : int = getattr(__a , normalizer_state.pop("""type""" ) )
lowerCamelCase : str = do_lower_case
lowerCamelCase : List[Any] = strip_accents
lowerCamelCase : Tuple = tokenize_chinese_chars
lowerCamelCase : str = normalizer_class(**__a )
lowerCamelCase : Union[str, Any] = do_lower_case
def a__ ( self: Tuple , __a: List[Any] , __a: int=None )-> Optional[Any]:
lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self: Any , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Dict = [self.sep_token_id]
lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: Optional[int] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
lowerCamelCase : Optional[Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 222 | 0 |
import math
def __UpperCamelCase ( _A : float , _A : float ) ->float:
"""simple docstring"""
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(_A ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 75 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='%(message)s')
def __UpperCamelCase ( _A : np.ndarray ) ->np.ndarray:
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
lowerCamelCase_ =np.nan
for i in range(_A ):
lowerCamelCase_ =features[:, labels == i]
lowerCamelCase_ =data.mean(1 )
# Centralize the data of class i
lowerCamelCase_ =data - column_reshape(_A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase_ =np.dot(_A , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
lowerCamelCase_ =features.mean(1 )
lowerCamelCase_ =np.nan
for i in range(_A ):
lowerCamelCase_ =features[:, labels == i]
lowerCamelCase_ =data.shape[1]
lowerCamelCase_ =data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCamelCase_ =device_data * np.dot(
column_reshape(_A ) - column_reshape(_A ) , (column_reshape(_A ) - column_reshape(_A )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase ( _A : np.ndarray , _A : int ) ->np.ndarray:
"""simple docstring"""
# Check if the features have been loaded
if features.any():
lowerCamelCase_ =features.mean(1 )
# Center the dataset
lowerCamelCase_ =features - np.reshape(_A , (data_mean.size, 1) )
lowerCamelCase_ =np.dot(_A , centered_data.T ) / features.shape[1]
lowerCamelCase_ , lowerCamelCase_ =np.linalg.eigh(_A )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCamelCase_ =np.dot(filtered_eigenvectors.T , _A )
logging.info("""Principal Component Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A )
logging.error("""Dataset empty""" )
raise AssertionError
def __UpperCamelCase ( _A : np.ndarray , _A : np.ndarray , _A : int , _A : int ) ->np.ndarray:
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCamelCase_ , lowerCamelCase_ =eigh(
covariance_between_classes(_A , _A , _A ) , covariance_within_classes(_A , _A , _A ) , )
lowerCamelCase_ =eigenvectors[:, ::-1][:, :dimensions]
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ =np.linalg.svd(_A )
lowerCamelCase_ =svd_matrix[:, 0:dimensions]
lowerCamelCase_ =np.dot(filtered_svd_matrix.T , _A )
logging.info("""Linear Discriminant Analysis computed""" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="""%(message)s""" , force=_A )
logging.error("""Dataset empty""" )
raise AssertionError
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
# Create dummy dataset with 2 classes and 3 features
lowerCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCamelCase_ =np.array([0, 0, 0, 1, 1] )
lowerCamelCase_ =2
lowerCamelCase_ =2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_A ) as error_info:
lowerCamelCase_ =linear_discriminant_analysis(
_A , _A , _A , _A )
if isinstance(_A , np.ndarray ):
raise AssertionError(
"""Did not raise AssertionError for dimensions > classes""" )
assert error_info.type is AssertionError
def __UpperCamelCase ( ) ->None:
"""simple docstring"""
lowerCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCamelCase_ =2
lowerCamelCase_ =np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_A ) as error_info:
lowerCamelCase_ =principal_component_analysis(_A , _A )
if not np.allclose(_A , _A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 1 |
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(a__ , x % y)
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return (x * y) // greatest_common_divisor(a__ , a__)
def _UpperCAmelCase ( a__ = 2_0):
'''simple docstring'''
a_ : Any = 1
for i in range(1 , n + 1):
a_ : Tuple = lcm(a__ , a__)
return g
if __name__ == "__main__":
print(F"""{solution() = }""")
| 540 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class A__(a_ ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> List[str]:
a_ : List[Any] = SMALL_MODEL_IDENTIFIER
a_ : Optional[int] = """pt"""
a_ : Union[str, Any] = """tf"""
def UpperCamelCase__ ( self , _lowercase ) -> str:
a_ : Union[str, Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> List[str]:
a_ : int = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : str = """mock_framework"""
# Framework provided - return whatever the user provides
a_ : Optional[int] = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
a_ : Optional[int] = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
a_ : List[str] = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
a_ : Union[str, Any] = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
a_ : Union[str, Any] = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
a_ : Union[str, Any] = FeaturesManager.determine_framework(_lowercase )
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : int = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ):
a_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
a_ : Dict = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_torch_available""" , _lowercase ):
a_ : Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
a_ : Optional[Any] = MagicMock(return_value=_lowercase )
a_ : List[Any] = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowercase ):
a_ : Any = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
a_ : List[str] = MagicMock(return_value=_lowercase )
a_ : Optional[Any] = MagicMock(return_value=_lowercase )
with patch("""transformers.onnx.features.is_tf_available""" , _lowercase ), patch(
"""transformers.onnx.features.is_torch_available""" , _lowercase ):
with self.assertRaises(_lowercase ):
a_ : Dict = FeaturesManager.determine_framework(self.test_model )
| 540 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
__snake_case :str = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Optional[Any] = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Union[str, Any] = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
__snake_case :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case :Union[str, Any] = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :List[str] = ['''ViTFeatureExtractor''']
__snake_case :Optional[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :str = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case :Tuple = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__snake_case :Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 60 | 1 |
def UpperCamelCase__ ( _A: Optional[int] = 100 ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 479 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : List[str] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__magic_name__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615 | 0 |
from math import factorial, pi
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase = 30 ):
if not isinstance(lowerCAmelCase , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
_UpperCamelCase = float(lowerCAmelCase )
_UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase = 30 ):
if not isinstance(lowerCAmelCase , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
_UpperCamelCase = float(lowerCAmelCase )
_UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 105 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase : Optional[Any] = logging.getLogger(__name__)
class __A( __UpperCAmelCase ):
def __init__( self, A=-1 ):
"""simple docstring"""
_UpperCamelCase = label_idx
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
if isinstance(A, A ):
_UpperCamelCase = mode.value
_UpperCamelCase = os.path.join(A, F'''{mode}.txt''' )
_UpperCamelCase = 1
_UpperCamelCase = []
with open(A, encoding='''utf-8''' ) as f:
_UpperCamelCase = []
_UpperCamelCase = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''', words=A, labels=A ) )
guid_index += 1
_UpperCamelCase = []
_UpperCamelCase = []
else:
_UpperCamelCase = line.split(''' ''' )
words.append(splits[0] )
if len(A ) > 1:
labels.append(splits[self.label_idx].replace('''\n''', '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''', words=A, labels=A ) )
return examples
def _UpperCamelCase ( self, A, A, A ):
"""simple docstring"""
_UpperCamelCase = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(A )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_UpperCamelCase = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(A )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''', line.split()[0] )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if path:
with open(A, '''r''' ) as f:
_UpperCamelCase = f.read().splitlines()
if "O" not in labels:
_UpperCamelCase = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __A( __UpperCAmelCase ):
def __init__( self ):
"""simple docstring"""
super().__init__(label_idx=-2 )
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if path:
with open(A, '''r''' ) as f:
_UpperCamelCase = f.read().splitlines()
if "O" not in labels:
_UpperCamelCase = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __A( __UpperCAmelCase ):
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
if isinstance(A, A ):
_UpperCamelCase = mode.value
_UpperCamelCase = os.path.join(A, F'''{mode}.txt''' )
_UpperCamelCase = 1
_UpperCamelCase = []
with open(A, encoding='''utf-8''' ) as f:
for sentence in parse_incr(A ):
_UpperCamelCase = []
_UpperCamelCase = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(A ) == len(A )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''', words=A, labels=A ) )
guid_index += 1
return examples
def _UpperCamelCase ( self, A, A, A ):
"""simple docstring"""
_UpperCamelCase = 0
for sentence in parse_incr(A ):
_UpperCamelCase = preds_list[example_id]
_UpperCamelCase = ''''''
for token in sentence:
out += F'''{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(A )
example_id += 1
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if path:
with open(A, '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 105 | 1 |
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__magic_name__ : str = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None) -> str:
"""simple docstring"""
if "." in tensor_name:
UpperCamelCase = tensor_name.split('.')
for split in splits[:-1]:
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase)
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.')
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.')
UpperCamelCase = tensor_name in module._buffers
UpperCamelCase = getattr(_UpperCamelCase , _UpperCamelCase)
if old_value.device == torch.device('meta') and device not in ["meta", torch.device('meta')] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.')
UpperCamelCase = False
UpperCamelCase = False
if is_buffer or not is_bitsandbytes_available():
UpperCamelCase = False
UpperCamelCase = False
else:
UpperCamelCase = hasattr(bnb.nn , 'Params4bit') and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit)
UpperCamelCase = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams)
if is_abit or is_abit:
UpperCamelCase = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
UpperCamelCase = old_value.to(_UpperCamelCase)
elif isinstance(_UpperCamelCase , torch.Tensor):
UpperCamelCase = value.to('cpu')
if value.dtype == torch.inta:
UpperCamelCase = version.parse(importlib.metadata.version('bitsandbytes')) > version.parse(
'0.37.2')
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.')
else:
UpperCamelCase = torch.tensor(_UpperCamelCase , device='cpu')
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _UpperCamelCase) and fpaa_statistics is None:
UpperCamelCase = new_value.T
UpperCamelCase = old_value.__dict__
if is_abit:
UpperCamelCase = bnb.nn.IntaParams(_UpperCamelCase , requires_grad=_UpperCamelCase , **_UpperCamelCase).to(_UpperCamelCase)
elif is_abit:
UpperCamelCase = bnb.nn.Paramsabit(_UpperCamelCase , requires_grad=_UpperCamelCase , **_UpperCamelCase).to(_UpperCamelCase)
UpperCamelCase = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(_UpperCamelCase))
else:
if value is None:
UpperCamelCase = old_value.to(_UpperCamelCase)
elif isinstance(_UpperCamelCase , torch.Tensor):
UpperCamelCase = value.to(_UpperCamelCase)
else:
UpperCamelCase = torch.tensor(_UpperCamelCase , device=_UpperCamelCase)
if is_buffer:
UpperCamelCase = new_value
else:
UpperCamelCase = nn.Parameter(_UpperCamelCase , requires_grad=old_value.requires_grad)
UpperCamelCase = new_value
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False) -> List[Any]:
"""simple docstring"""
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(_UpperCamelCase)
if (isinstance(_UpperCamelCase , nn.Linear) or isinstance(_UpperCamelCase , _UpperCamelCase)) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_UpperCamelCase) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(_UpperCamelCase , _UpperCamelCase):
UpperCamelCase , UpperCamelCase = module.weight.shape
else:
UpperCamelCase = module.in_features
UpperCamelCase = module.out_features
if quantization_config.quantization_method() == "llm_int8":
UpperCamelCase = bnb.nn.LinearabitLt(
_UpperCamelCase , _UpperCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
UpperCamelCase = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
UpperCamelCase = bnb.nn.Linearabit(
_UpperCamelCase , _UpperCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
UpperCamelCase = True
# Store the module class in case we need to transpose the weight later
UpperCamelCase = type(_UpperCamelCase)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCamelCase)
if len(list(module.children())) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , has_been_replaced=_UpperCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowercase__ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
UpperCamelCase , UpperCamelCase = _replace_with_bnb_linear(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.')
return model
def lowercase__ ( *_UpperCamelCase , **_UpperCamelCase) -> Any:
"""simple docstring"""
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , _UpperCamelCase , )
return replace_with_bnb_linear(*_UpperCamelCase , **_UpperCamelCase)
def lowercase__ ( *_UpperCamelCase , **_UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , _UpperCamelCase , )
return set_module_quantized_tensor_to_device(*_UpperCamelCase , **_UpperCamelCase)
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
UpperCamelCase = deepcopy(_UpperCamelCase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
UpperCamelCase = find_tied_parameters(_UpperCamelCase)
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCamelCase , _UpperCamelCase):
UpperCamelCase = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
UpperCamelCase = sum(_UpperCamelCase , [])
UpperCamelCase = len(_UpperCamelCase) > 0
# Check if it is a base model
UpperCamelCase = not hasattr(_UpperCamelCase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children())
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(_UpperCamelCase) - set(_UpperCamelCase)
UpperCamelCase = list(set(_UpperCamelCase)) + list(_UpperCamelCase)
# remove ".weight" from the keys
UpperCamelCase = ['.weight', '.bias']
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(_UpperCamelCase , '')
filtered_module_names.append(_UpperCamelCase)
return filtered_module_names
| 280 |
def lowercase__ ( _UpperCamelCase) -> Any:
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = {
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
UpperCamelCase = len(_UpperCamelCase) if (len(_UpperCamelCase) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8) , 'Stack'.center(_UpperCamelCase) , 'Postfix'.center(_UpperCamelCase) , sep=' | ' , )
print('-' * (print_width * 3 + 7))
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(_UpperCamelCase) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(_UpperCamelCase) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop()) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(_UpperCamelCase) == 0:
stack.append(_UpperCamelCase) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(_UpperCamelCase) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop()) # pop stack & add to Postfix
stack.append(_UpperCamelCase) # push x to stack
print(
x.center(8) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , sep=' | ' , ) # Output in tabular format
while len(_UpperCamelCase) > 0: # while stack is not empty
post_fix.append(stack.pop()) # pop stack & add to Postfix
print(
' '.center(8) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , (''.join(_UpperCamelCase)).ljust(_UpperCamelCase) , sep=' | ' , ) # Output in tabular format
return "".join(_UpperCamelCase) # return Postfix as str
def lowercase__ ( _UpperCamelCase) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = list(infix[::-1]) # reverse the infix equation
for i in range(len(_UpperCamelCase)):
if infix[i] == "(":
UpperCamelCase = ')' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase = '(' # change ")" to "("
return (infix_2_postfix(''.join(_UpperCamelCase)))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
__magic_name__ : int = input('''\nEnter an Infix Equation = ''') # Input an Infix equation
__magic_name__ : Optional[Any] = ''''''.join(Infix.split()) # Remove spaces from the input
print('''\n\t''', Infix, '''(Infix) -> ''', infix_2_prefix(Infix), '''(Prefix)''')
| 280 | 1 |
from __future__ import annotations
class _A :
def __init__( self : Optional[Any] , _A : int ) -> None:
"""simple docstring"""
lowercase : int = order
# a_{0} ... a_{k}
lowercase : Union[str, Any] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowercase : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowercase : int = [0.0] * self.order
# y[n-1] ... y[n-k]
lowercase : List[Any] = [0.0] * self.order
def __a ( self : List[str] , _A : list[float] , _A : list[float] ) -> None:
"""simple docstring"""
if len(_A ) < self.order:
lowercase : int = [1.0, *a_coeffs]
if len(_A ) != self.order + 1:
lowercase : Union[str, Any] = (
f"""Expected a_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_A )}"""
)
raise ValueError(_A )
if len(_A ) != self.order + 1:
lowercase : Any = (
f"""Expected b_coeffs to have {self.order + 1} elements """
f"""for {self.order}-order filter, got {len(_A )}"""
)
raise ValueError(_A )
lowercase : Optional[int] = a_coeffs
lowercase : Any = b_coeffs
def __a ( self : Dict , _A : float ) -> float:
"""simple docstring"""
lowercase : Any = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowercase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowercase : Dict = self.input_history[:-1]
lowercase : Dict = self.output_history[:-1]
lowercase : Optional[Any] = sample
lowercase : Optional[int] = result
return result | 596 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class _A ( unittest.TestCase ):
def __a ( self : Any ) -> str:
"""simple docstring"""
lowercase : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : Optional[int] = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_A ) , torch_builtin(_A ) ) )
self.assertFalse(torch.allclose(gelu_python(_A ) , gelu_new(_A ) ) )
def __a ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase : Union[str, Any] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
lowercase : str = get_activation('''gelu''' )
lowercase : str = get_activation('''gelu_10''' )
lowercase : Dict = torch_builtin(_A )
lowercase : List[Any] = geluaa(_A )
lowercase : List[str] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_A ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __a ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_A ):
get_activation('''bogus''' )
with self.assertRaises(_A ):
get_activation(_A )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : Optional[Any] = get_activation('''gelu''' )
lowercase : Any = 1
lowercase : Tuple = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_A ):
lowercase : str = acta.a | 596 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 458 |
def __magic_name__ ( lowercase = 100 ) -> int:
"""simple docstring"""
lowercase_ : Dict = (n * (n + 1) // 2) ** 2
lowercase_ : List[str] = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''') | 458 | 1 |
import argparse
from collections import defaultdict
import yaml
UpperCamelCase = 'docs/source/en/_toctree.yml'
def lowerCamelCase_ ( _lowercase ) -> Any:
__A : Tuple = defaultdict(A_ )
__A : Optional[Any] = []
__A : Optional[int] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(A_ )
__A : Any = new_doc_list
__A : str = [key for key, value in counts.items() if value > 1]
__A : Any = []
for duplicate_key in duplicates:
__A : Tuple = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(A_ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
__A : List[Any] = sorted(A_ , key=lambda _lowercase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(A_ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(A_ )
# Sort
return overview_doc
def lowerCamelCase_ ( _lowercase=False ) -> str:
with open(A_ , encoding="utf-8" ) as f:
__A : int = yaml.safe_load(f.read() )
# Get to the API doc
__A : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A : int = content[api_idx]["sections"]
# Then to the model doc
__A : int = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__A : Optional[Any] = api_doc[scheduler_idx]["sections"]
__A : List[Any] = clean_doc_toc(A_ )
__A : List[str] = False
if new_scheduler_doc != scheduler_doc:
__A : Tuple = True
if overwrite:
__A : int = new_scheduler_doc
if diff:
if overwrite:
__A : List[Any] = api_doc
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(A_ , allow_unicode=A_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def lowerCamelCase_ ( _lowercase=False ) -> Union[str, Any]:
with open(A_ , encoding="utf-8" ) as f:
__A : Optional[Any] = yaml.safe_load(f.read() )
# Get to the API doc
__A : Tuple = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__A : Dict = content[api_idx]["sections"]
# Then to the model doc
__A : str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__A : Optional[int] = False
__A : Dict = api_doc[pipeline_idx]["sections"]
__A : str = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__A : Optional[Any] = pipeline_doc["section"]
__A : List[Any] = clean_doc_toc(A_ )
if overwrite:
__A : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(A_ )
# sort overall pipeline doc
__A : Optional[int] = clean_doc_toc(A_ )
if new_pipeline_docs != pipeline_docs:
__A : str = True
if overwrite:
__A : Any = new_pipeline_docs
if diff:
if overwrite:
__A : Union[str, Any] = api_doc
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(A_ , allow_unicode=A_ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 710 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _a ( lowerCAmelCase__ ):
'''simple docstring'''
lowerCamelCase_ : Dict = """vit_msn"""
def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3_072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-06 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
__A : Any = hidden_size
__A : Optional[int] = num_hidden_layers
__A : str = num_attention_heads
__A : Any = intermediate_size
__A : Dict = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : Dict = initializer_range
__A : List[str] = layer_norm_eps
__A : int = image_size
__A : Any = patch_size
__A : Optional[int] = num_channels
__A : int = qkv_bias
| 387 | 0 |
__UpperCamelCase = 2_5_6
# Modulus to hash a string
__UpperCamelCase = 1_0_0_0_0_0_3
def UpperCamelCase_( _A :str , _A :str )-> bool:
UpperCamelCase__ = len(_A )
UpperCamelCase__ = len(_A )
if p_len > t_len:
return False
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
UpperCamelCase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCamelCase_( )-> None:
UpperCamelCase__ = "abc1abc12"
UpperCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
UpperCamelCase__ = "ABABX"
UpperCamelCase__ = "ABABZABABYABABX"
assert rabin_karp(_A , _A )
# Test 3)
UpperCamelCase__ = "AAAB"
UpperCamelCase__ = "ABAAAAAB"
assert rabin_karp(_A , _A )
# Test 4)
UpperCamelCase__ = "abcdabcy"
UpperCamelCase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_A , _A )
# Test 5)
UpperCamelCase__ = "Lü"
UpperCamelCase__ = "Lüsai"
assert rabin_karp(_A , _A )
UpperCamelCase__ = "Lue"
assert not rabin_karp(_A , _A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 551 |
__UpperCamelCase = 2_5_6
# Modulus to hash a string
__UpperCamelCase = 1_0_0_0_0_0_3
def UpperCamelCase_( _A :str , _A :str )-> bool:
UpperCamelCase__ = len(_A )
UpperCamelCase__ = len(_A )
if p_len > t_len:
return False
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
UpperCamelCase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
UpperCamelCase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
UpperCamelCase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
UpperCamelCase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCamelCase_( )-> None:
UpperCamelCase__ = "abc1abc12"
UpperCamelCase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
UpperCamelCase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
UpperCamelCase__ = "ABABX"
UpperCamelCase__ = "ABABZABABYABABX"
assert rabin_karp(_A , _A )
# Test 3)
UpperCamelCase__ = "AAAB"
UpperCamelCase__ = "ABAAAAAB"
assert rabin_karp(_A , _A )
# Test 4)
UpperCamelCase__ = "abcdabcy"
UpperCamelCase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_A , _A )
# Test 5)
UpperCamelCase__ = "Lü"
UpperCamelCase__ = "Lüsai"
assert rabin_karp(_A , _A )
UpperCamelCase__ = "Lue"
assert not rabin_karp(_A , _A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 551 | 1 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 100_0000 ):
lowercase__ : str = 1
lowercase__ : List[Any] = 1
lowercase__ : List[Any] = {1: 1}
for inputa in range(2 , UpperCAmelCase ):
lowercase__ : Dict = 0
lowercase__ : Optional[int] = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
lowercase__ : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
lowercase__ : List[str] = counter
if counter > pre_counter:
lowercase__ : List[str] = inputa
lowercase__ : Dict = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 719 | '''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ , lowercase__ : int = len(UpperCAmelCase ), len(grid[0] )
if (
min(UpperCAmelCase , UpperCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
lowercase__ : Optional[Any] = 0
count += depth_first_search(UpperCAmelCase , row + 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , row - 1 , UpperCAmelCase , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col + 1 , UpperCAmelCase )
count += depth_first_search(UpperCAmelCase , UpperCAmelCase , col - 1 , UpperCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428 | 0 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
A__ : Any = logging.get_logger(__name__)
A__ : Optional[Any] = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
A__ : Union[str, Any] = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _snake_case ( lowerCamelCase__ : Tuple ) -> int:
lowerCamelCase_ : Union[str, Any] =torch.load(snake_case__ , map_location="cpu" )
return sd
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Any=rename_keys_prefix ) -> List[Any]:
lowerCamelCase_ : Optional[int] =OrderedDict()
lowerCamelCase_ : List[str] =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCamelCase_ : Optional[int] =key
for name_pair in rename_keys_prefix:
lowerCamelCase_ : List[Any] =new_key.replace(name_pair[0] , name_pair[1] )
lowerCamelCase_ : Optional[Any] =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCamelCase_ : Optional[Any] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def _snake_case ( lowerCamelCase__ : str , lowerCamelCase__ : str ) -> Optional[Any]:
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
lowerCamelCase_ : int ="pretraining"
if "vcr" in checkpoint_path:
lowerCamelCase_ : Optional[Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase_ : int ={"visual_embedding_dim": 2_048}
elif "vqa" in checkpoint_path:
lowerCamelCase_ : Tuple ={"visual_embedding_dim": 2_048}
elif "nlvr" in checkpoint_path:
lowerCamelCase_ : Optional[Any] ={"visual_embedding_dim": 1_024}
else:
raise NotImplementedError(F"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
lowerCamelCase_ : Optional[int] ={"visual_embedding_dim": 512}
lowerCamelCase_ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
lowerCamelCase_ : Optional[int] ={"visual_embedding_dim": 2_048}
lowerCamelCase_ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
lowerCamelCase_ : str ={"visual_embedding_dim": 2_048, "num_labels": 3_129}
lowerCamelCase_ : Union[str, Any] ="vqa"
elif "nlvr" in checkpoint_path:
lowerCamelCase_ : str ={
"visual_embedding_dim": 1_024,
"num_labels": 2,
}
lowerCamelCase_ : Optional[int] ="nlvr"
lowerCamelCase_ : Optional[int] =VisualBertConfig(**snake_case__ )
# Load State Dict
lowerCamelCase_ : str =load_state_dict(snake_case__ )
lowerCamelCase_ : int =get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
lowerCamelCase_ : Union[str, Any] =VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
lowerCamelCase_ : Union[str, Any] =VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
lowerCamelCase_ : str =VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
lowerCamelCase_ : int =VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
A__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
A__ : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 153 | from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _snake_case ( _lowercase ):
lowerCamelCase__: Optional[int] = "audio-spectrogram-transformer"
def __init__( self: Any , __lowerCamelCase: Union[str, Any]=7_68 , __lowerCamelCase: List[str]=12 , __lowerCamelCase: Optional[Any]=12 , __lowerCamelCase: Optional[int]=30_72 , __lowerCamelCase: Optional[Any]="gelu" , __lowerCamelCase: Dict=0.0 , __lowerCamelCase: Optional[Any]=0.0 , __lowerCamelCase: Tuple=0.02 , __lowerCamelCase: List[Any]=1e-12 , __lowerCamelCase: Tuple=16 , __lowerCamelCase: Optional[Any]=True , __lowerCamelCase: Optional[Any]=10 , __lowerCamelCase: str=10 , __lowerCamelCase: Any=10_24 , __lowerCamelCase: Dict=1_28 , **__lowerCamelCase: Optional[Any] , ) -> List[str]:
super().__init__(**__lowerCamelCase )
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : int = hidden_dropout_prob
__UpperCAmelCase : Dict = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = initializer_range
__UpperCAmelCase : str = layer_norm_eps
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : Any = qkv_bias
__UpperCAmelCase : str = frequency_stride
__UpperCAmelCase : Union[str, Any] = time_stride
__UpperCAmelCase : Dict = max_length
__UpperCAmelCase : Optional[Any] = num_mel_bins
| 382 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 3 , ):
"""simple docstring"""
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
return (pow(lowerCamelCase__ , 2 ) + step) % modulus
for _ in range(lowerCamelCase__ ):
# These track the position within the cycle detection logic.
lowerCAmelCase__ = seed
lowerCAmelCase__ = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase__ = rand_fn(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
lowerCAmelCase__ = gcd(hare - tortoise , lowerCamelCase__ )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
lowerCAmelCase__ = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
__lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : Dict = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"{args.num} is probably prime")
else:
__lowerCAmelCase : List[str] = args.num // divisor
print(F"{args.num} = {divisor} * {quotient}")
| 674 | """simple docstring"""
import pprint
import requests
__lowerCAmelCase : Union[str, Any] = "https://zenquotes.io/api"
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
__lowerCAmelCase : Union[str, Any] = random_quotes()
pprint.pprint(response)
| 674 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : dict):
A_ : List[Any] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
A_ : Union[str, Any] = set()
return any(
node not in visited and depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
for node in graph)
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : int , lowerCamelCase : set , lowerCamelCase : set):
visited.add(lowerCamelCase)
rec_stk.add(lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 665 |
def _UpperCAmelCase ( UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
__lowerCAmelCase = _modexpt(UpperCamelCase , exponent // 2 , UpperCamelCase ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase , exponent - 1 , UpperCamelCase )) % modulo_value
def _UpperCAmelCase ( UpperCamelCase: int = 1_7_7_7 , UpperCamelCase: int = 1_8_5_5 , UpperCamelCase: int = 8 ):
"""simple docstring"""
__lowerCAmelCase = base
for _ in range(1 , UpperCamelCase ):
__lowerCAmelCase = _modexpt(UpperCamelCase , UpperCamelCase , 1_0**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 611 | 0 |
def A_ ( __a : float , __a : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"{price_plus_tax(100, 0.25) = }")
print(f"{price_plus_tax(125.50, 0.05) = }")
| 705 |
def A_ ( __a : Any ):
"""simple docstring"""
if not head:
return True
# split the list to two parts
a__ , a__ = head.next, head
while fast and fast.next:
a__ = fast.next.next
a__ = slow.next
a__ = slow.next
a__ = None # Don't forget here! But forget still works!
# reverse the second part
a__ = None
while second:
a__ = second.next
a__ = node
a__ = second
a__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
a__ = node.next
a__ = head.next
return True
def A_ ( __a : int ):
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
a__ = a__ = a__ = head
while fast and fast.next:
a__ , a__ = fast.next.next, slow.next
# 2. Push the second half into the stack
a__ = [slow.val]
while slow.next:
a__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
a__ = cur.next
return True
def A_ ( __a : Any ):
"""simple docstring"""
if not head or not head.next:
return True
a__ = {}
a__ = 0
while head:
if head.val in d:
d[head.val].append(__a )
else:
a__ = [pos]
a__ = head.next
pos += 1
a__ = pos - 1
a__ = 0
for v in d.values():
if len(__a ) % 2 != 0:
middle += 1
else:
a__ = 0
for i in range(0 , len(__a ) ):
if v[i] + v[len(__a ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 351 | 0 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
a__ : str = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
a__ : Tuple = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
a__ : str = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
a__ : Dict = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
a__ : List[str] = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for tf_name, hf_name in patterns:
__SCREAMING_SNAKE_CASE = k.replace(lowerCAmelCase_ , lowerCAmelCase_ )
return k
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BigBirdPegasusConfig(**lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = BigBirdPegasusForConditionalGeneration(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch_model.state_dict()
__SCREAMING_SNAKE_CASE = {}
# separating decoder weights
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if k.startswith("pegasus/decoder" )}
__SCREAMING_SNAKE_CASE = {k: tf_weights[k] for k in tf_weights if not k.startswith("pegasus/decoder" )}
for k, v in tqdm(decoder_weights.items() , "tf -> hf conversion" ):
__SCREAMING_SNAKE_CASE = [k.endswith(lowerCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(lowerCAmelCase_ ):
continue
__SCREAMING_SNAKE_CASE = DECODER_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(lowerCAmelCase_ , lowerCAmelCase_ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase_ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , "tf -> hf conversion" ):
__SCREAMING_SNAKE_CASE = [k.endswith(lowerCAmelCase_ ) for ending in KEYS_TO_IGNORE]
if any(lowerCAmelCase_ ):
continue
__SCREAMING_SNAKE_CASE = REMAINING_PATTERNS
__SCREAMING_SNAKE_CASE = rename_state_dict_key(lowerCAmelCase_ , lowerCAmelCase_ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["dense", "query", "key", "value"] ):
__SCREAMING_SNAKE_CASE = v.T
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCAmelCase_ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
__SCREAMING_SNAKE_CASE = mapping["model.embed_positions.weight"]
__SCREAMING_SNAKE_CASE = mapping.pop("model.embed_positions.weight" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch_model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = [
k
for k in missing
if k
not in [
"final_logits_bias",
"model.encoder.embed_tokens.weight",
"model.decoder.embed_tokens.weight",
"lm_head.weight",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.train.list_variables(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = ["global_step"]
for name, shape in tqdm(lowerCAmelCase_ , desc="converting tf checkpoint to dict" ):
__SCREAMING_SNAKE_CASE = any(pat in name for pat in ignore_name )
if skip_key:
continue
__SCREAMING_SNAKE_CASE = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = array
return tf_weights
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_tf_weights_as_numpy(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = convert_bigbird_pegasus(lowerCAmelCase_ , lowerCAmelCase_ )
torch_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
a__ : str = parser.parse_args()
a__ : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 682 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase__ (lowerCAmelCase_=None ):
'''simple docstring'''
if subparsers is not None:
__SCREAMING_SNAKE_CASE = subparsers.add_parser("env" )
else:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.__version__
__SCREAMING_SNAKE_CASE = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE = is_xpu_available()
__SCREAMING_SNAKE_CASE = is_npu_available()
__SCREAMING_SNAKE_CASE = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict()
__SCREAMING_SNAKE_CASE = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowerCAmelCase_ ),
"PyTorch NPU available": str(lowerCAmelCase_ ),
"System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__SCREAMING_SNAKE_CASE = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__SCREAMING_SNAKE_CASE = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else f"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = accelerate_config
return info
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = env_command_parser()
__SCREAMING_SNAKE_CASE = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 682 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if len(snake_case ) <= 1:
return lst
__magic_name__ :str = 1
while i < len(snake_case ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__magic_name__ , __magic_name__ :int = lst[i], lst[i - 1]
i -= 1
if i == 0:
__magic_name__ :int = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input("""Enter numbers separated by a comma:\n""").strip()
SCREAMING_SNAKE_CASE__ : Dict = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 180 |
from __future__ import annotations
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = 0
__magic_name__ :Tuple = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
__magic_name__ :str = i + 1
else:
__magic_name__ :List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 180 | 1 |
"""simple docstring"""
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = 42
A__ = None
def _lowerCAmelCase ( lowerCamelCase__ : List[Any], lowerCamelCase__ : List[Any]=0.999, lowerCamelCase__ : Union[str, Any]="cosine", ) -> Dict:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase__ : Tuple ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase__ : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
_SCREAMING_SNAKE_CASE : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Tuple = i / num_diffusion_timesteps
_SCREAMING_SNAKE_CASE : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase__ ) / alpha_bar_fn(lowerCamelCase__ ), lowerCamelCase__ ) )
return torch.tensor(lowerCamelCase__, dtype=torch.floataa )
class UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self , snake_case__ = 1000 , snake_case__ = "fixed_small_log" , snake_case__ = True , snake_case__ = 1.0 , snake_case__ = "epsilon" , snake_case__ = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
_SCREAMING_SNAKE_CASE : List[Any] = betas_for_alpha_bar(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = 1.0 - self.betas
_SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 )
_SCREAMING_SNAKE_CASE : str = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
_SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
# setable values
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(np.arange(0 , snake_case__ )[::-1].copy() )
_SCREAMING_SNAKE_CASE : Dict = variance_type
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ = None ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = num_inference_steps
_SCREAMING_SNAKE_CASE : Optional[int] = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_SCREAMING_SNAKE_CASE : Optional[Any] = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
_SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(snake_case__ ).to(snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ):
"""simple docstring"""
if prev_timestep is None:
_SCREAMING_SNAKE_CASE : List[str] = t - 1
_SCREAMING_SNAKE_CASE : Dict = self.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : List[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_SCREAMING_SNAKE_CASE : List[Any] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Optional[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_SCREAMING_SNAKE_CASE : Dict = self.betas[t]
else:
_SCREAMING_SNAKE_CASE : Tuple = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_SCREAMING_SNAKE_CASE : Tuple = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_SCREAMING_SNAKE_CASE : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_SCREAMING_SNAKE_CASE : List[str] = torch.log(torch.clamp(snake_case__ , min=1E-20 ) )
_SCREAMING_SNAKE_CASE : List[str] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_SCREAMING_SNAKE_CASE : List[Any] = variance.log()
_SCREAMING_SNAKE_CASE : List[str] = beta.log()
_SCREAMING_SNAKE_CASE : Any = (predicted_variance + 1) / 2
_SCREAMING_SNAKE_CASE : Tuple = frac * max_log + (1 - frac) * min_log
return variance
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__=None , snake_case__ = True , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
_SCREAMING_SNAKE_CASE : Any = None
# 1. compute alphas, betas
if prev_timestep is None:
_SCREAMING_SNAKE_CASE : Tuple = t - 1
_SCREAMING_SNAKE_CASE : Dict = self.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE : str = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_SCREAMING_SNAKE_CASE : List[str] = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_SCREAMING_SNAKE_CASE : Tuple = self.betas[t]
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.alphas[t]
else:
_SCREAMING_SNAKE_CASE : List[str] = 1 - alpha_prod_t / alpha_prod_t_prev
_SCREAMING_SNAKE_CASE : Dict = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_SCREAMING_SNAKE_CASE : Union[str, Any] = model_output
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_SCREAMING_SNAKE_CASE : Optional[int] = torch.clamp(
snake_case__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_SCREAMING_SNAKE_CASE : int = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_SCREAMING_SNAKE_CASE : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_SCREAMING_SNAKE_CASE : Optional[int] = 0
if t > 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case__ , device=model_output.device )
_SCREAMING_SNAKE_CASE : Dict = self._get_variance(
snake_case__ , predicted_variance=snake_case__ , prev_timestep=snake_case__ , )
if self.variance_type == "fixed_small_log":
_SCREAMING_SNAKE_CASE : Any = variance
elif self.variance_type == "learned_range":
_SCREAMING_SNAKE_CASE : Optional[Any] = (0.5 * variance).exp()
else:
raise ValueError(
F'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
" for the UnCLIPScheduler." )
_SCREAMING_SNAKE_CASE : Any = variance * variance_noise
_SCREAMING_SNAKE_CASE : Optional[Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def __SCREAMING_SNAKE_CASE ( self , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
_SCREAMING_SNAKE_CASE : Dict = timesteps.to(original_samples.device )
_SCREAMING_SNAKE_CASE : str = alphas_cumprod[timesteps] ** 0.5
_SCREAMING_SNAKE_CASE : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE : Tuple = sqrt_alpha_prod.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE : Optional[int] = (1 - alphas_cumprod[timesteps]) ** 0.5
_SCREAMING_SNAKE_CASE : List[str] = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
_SCREAMING_SNAKE_CASE : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
_SCREAMING_SNAKE_CASE : Union[str, Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 572 |
"""simple docstring"""
from __future__ import annotations
import bisect
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> int:
if hi < 0:
_SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase__ )
while lo < hi:
_SCREAMING_SNAKE_CASE : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
_SCREAMING_SNAKE_CASE : int = mid + 1
else:
_SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> int:
if hi < 0:
_SCREAMING_SNAKE_CASE : Any = len(lowerCamelCase__ )
while lo < hi:
_SCREAMING_SNAKE_CASE : str = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
_SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
_SCREAMING_SNAKE_CASE : int = mid
return lo
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> None:
sorted_collection.insert(bisect_left(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int = 0, lowerCamelCase__ : int = -1 ) -> None:
sorted_collection.insert(bisect_right(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ), lowerCamelCase__ )
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int ) -> int | None:
_SCREAMING_SNAKE_CASE : Tuple = 0
_SCREAMING_SNAKE_CASE : int = len(lowerCamelCase__ ) - 1
while left <= right:
_SCREAMING_SNAKE_CASE : Union[str, Any] = left + (right - left) // 2
_SCREAMING_SNAKE_CASE : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
_SCREAMING_SNAKE_CASE : Optional[int] = midpoint - 1
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = midpoint + 1
return None
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int ) -> int | None:
_SCREAMING_SNAKE_CASE : List[str] = bisect.bisect_left(lowerCamelCase__, lowerCamelCase__ )
if index != len(lowerCamelCase__ ) and sorted_collection[index] == item:
return index
return None
def _lowerCAmelCase ( lowerCamelCase__ : list[int], lowerCamelCase__ : int, lowerCamelCase__ : int, lowerCamelCase__ : int ) -> int | None:
if right < left:
return None
_SCREAMING_SNAKE_CASE : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, midpoint - 1 )
else:
return binary_search_by_recursion(lowerCamelCase__, lowerCamelCase__, midpoint + 1, lowerCamelCase__ )
if __name__ == "__main__":
lowercase_ : Tuple = input('''Enter numbers separated by comma:\n''').strip()
lowercase_ : Any = sorted(int(item) for item in user_input.split(''','''))
lowercase_ : str = int(input('''Enter a single number to be found in the list:\n'''))
lowercase_ : List[str] = binary_search(collection, target)
if result is None:
print(F'{target} was not found in {collection}.')
else:
print(F'{target} was found at position {result} in {collection}.')
| 572 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : Dict = TransfoXLTokenizer
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : Tuple = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().setUp()
lowerCAmelCase__ = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , **snake_case__ : Optional[int] ):
lowerCAmelCase__ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[int] ):
lowerCAmelCase__ = """<unk> UNwanted , running"""
lowerCAmelCase__ = """<unk> unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
lowerCAmelCase__ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=snake_case__ )
lowerCAmelCase__ = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(snake_case__ , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [0, 4, 8, 7] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
lowerCAmelCase__ = TransfoXLTokenizer(lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = TransfoXLTokenizer(lower_case=snake_case__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _SCREAMING_SNAKE_CASE ( self : int ):
lowerCAmelCase__ = TransfoXLTokenizer(lower_case=snake_case__ )
lowerCAmelCase__ = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
lowerCAmelCase__ = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(snake_case__ ) , snake_case__ )
self.assertEqual(tokenizer.convert_tokens_to_string(snake_case__ ) , snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = len(snake_case__ )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(snake_case__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 674 | """simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = LayoutLMTokenizer
UpperCamelCase_ : List[Any] = LayoutLMTokenizerFast
UpperCamelCase_ : Dict = True
UpperCamelCase_ : Any = True
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
super().setUp()
lowerCAmelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : int , **snake_case__ : Union[str, Any] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , snake_case__ : Tuple ):
lowerCAmelCase__ = """UNwant\u00E9d,running"""
lowerCAmelCase__ = """unwanted, running"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(snake_case__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [7, 4, 5, 10, 8, 9] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
pass
| 674 | 1 |
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase__: Any = logging.get_logger(__name__)
lowerCAmelCase__: Union[str, Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
lowerCAmelCase__: str = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
lowerCAmelCase__: int = {
'allenai/longformer-base-4096': 4096,
'allenai/longformer-large-4096': 4096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = bs[:]
SCREAMING_SNAKE_CASE_ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
SCREAMING_SNAKE_CASE_ : Tuple = [chr(SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ) -> str:
SCREAMING_SNAKE_CASE_ : List[Any] = set()
SCREAMING_SNAKE_CASE_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
SCREAMING_SNAKE_CASE_ : Tuple = char
return pairs
class snake_case_ ( snake_case_ ):
__lowerCamelCase : str = VOCAB_FILES_NAMES
__lowerCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Any = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , **__lowerCAmelCase , ):
SCREAMING_SNAKE_CASE_ : str = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
SCREAMING_SNAKE_CASE_ : Any = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
SCREAMING_SNAKE_CASE_ : int = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
SCREAMING_SNAKE_CASE_ : List[str] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else unk_token
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
errors=A_ , bos_token=A_ , eos_token=A_ , unk_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , **A_ , )
with open(A_ , encoding='utf-8' ) as vocab_handle:
SCREAMING_SNAKE_CASE_ : str = json.load(A_ )
SCREAMING_SNAKE_CASE_ : str = {v: k for k, v in self.encoder.items()}
SCREAMING_SNAKE_CASE_ : int = errors # how to handle errors in decoding
SCREAMING_SNAKE_CASE_ : List[Any] = bytes_to_unicode()
SCREAMING_SNAKE_CASE_ : List[str] = {v: k for k, v in self.byte_encoder.items()}
with open(A_ , encoding='utf-8' ) as merges_handle:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = merges_handle.read().split('\n' )[1:-1]
SCREAMING_SNAKE_CASE_ : List[str] = [tuple(merge.split() ) for merge in bpe_merges]
SCREAMING_SNAKE_CASE_ : Dict = dict(zip(A_ , range(len(A_ ) ) ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE_ : Tuple = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def __A ( self ):
return len(self.encoder )
def __A ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __A ( self , __lowerCAmelCase ):
if token in self.cache:
return self.cache[token]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tuple(A_ )
SCREAMING_SNAKE_CASE_ : Any = get_pairs(A_ )
if not pairs:
return token
while True:
SCREAMING_SNAKE_CASE_ : List[Any] = min(A_ , key=lambda __lowerCAmelCase : self.bpe_ranks.get(A_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = bigram
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Tuple = 0
while i < len(A_ ):
try:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = word.index(A_ , A_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
SCREAMING_SNAKE_CASE_ : Tuple = j
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
SCREAMING_SNAKE_CASE_ : Optional[int] = tuple(A_ )
SCREAMING_SNAKE_CASE_ : Dict = new_word
if len(A_ ) == 1:
break
else:
SCREAMING_SNAKE_CASE_ : int = get_pairs(A_ )
SCREAMING_SNAKE_CASE_ : Tuple = ' '.join(A_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = word
return word
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = []
for token in re.findall(self.pat , A_ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(A_ ).split(' ' ) )
return bpe_tokens
def __A ( self , __lowerCAmelCase ):
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __A ( self , __lowerCAmelCase ):
return self.decoder.get(A_ )
def __A ( self , __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] = ''.join(A_ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : int = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ : List[str] = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + '\n' )
SCREAMING_SNAKE_CASE_ : Any = 0
with open(A_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
SCREAMING_SNAKE_CASE_ : Optional[int] = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is None:
return [1] + ([0] * len(A_ )) + [1]
return [1] + ([0] * len(A_ )) + [1, 1] + ([0] * len(A_ )) + [1]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(A_ ) > 0 and not text[0].isspace()):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ' ' + text
return (text, kwargs)
| 345 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase : List[Any] = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase : List[Any] = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(1_00_00):
out_file.write(data)
lowerCAmelCase : Tuple = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase : List[Any] = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 3 | 0 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCamelCase (lowercase_: Union[str, Any] ) -> Tuple:
A__ : Dict = {}
A__ : Optional[int] = job["""started_at"""]
A__ : Tuple = job["""completed_at"""]
A__ : Any = date_parser.parse(lowercase_ )
A__ : Tuple = date_parser.parse(lowercase_ )
A__ : Dict = round((end_datetime - start_datetime).total_seconds() / 60.0 )
A__ : int = start
A__ : Tuple = end
A__ : int = duration_in_min
return job_info
def UpperCamelCase (lowercase_: Union[str, Any] , lowercase_: List[str]=None ) -> Tuple:
A__ : List[Any] = None
if token is not None:
A__ : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
A__ : Any = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
A__ : List[str] = requests.get(lowercase_ , headers=lowercase_ ).json()
A__ : str = {}
try:
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
A__ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(lowercase_ ):
A__ : Optional[int] = requests.get(url + f"""&page={i + 2}""" , headers=lowercase_ ).json()
job_time.update({job["""name"""]: extract_time_from_single_job(lowercase_ ) for job in result["""jobs"""]} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
A_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
A_ : List[str] = parser.parse_args()
A_ : str = get_job_time(args.workflow_run_id)
A_ : List[Any] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[Any]:
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
_UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class __UpperCamelCase :
def __init__( self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ):
_UpperCAmelCase = kwargs.pop('''config''' , _UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCamelCase )
_UpperCAmelCase = True
_UpperCAmelCase , _UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = config_dict.get('''feature_extractor_type''' , _UpperCamelCase )
_UpperCAmelCase = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.feature_extractor_type``
_UpperCAmelCase = getattr(_UpperCamelCase , '''feature_extractor_type''' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
_UpperCAmelCase = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
_UpperCAmelCase = feature_extractor_class_from_name(_UpperCamelCase )
_UpperCAmelCase = feature_extractor_auto_map is not None
_UpperCAmelCase = feature_extractor_class is not None or type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
_UpperCAmelCase = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
_UpperCAmelCase = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
_UpperCAmelCase = kwargs.pop('''code_revision''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
_UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(_UpperCamelCase )]
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ):
FEATURE_EXTRACTOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase ) | 32 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase ( ):
A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase)
A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""")
# Register commands
get_config_parser(subparsers=lowerCamelCase)
env_command_parser(subparsers=lowerCamelCase)
launch_command_parser(subparsers=lowerCamelCase)
tpu_command_parser(subparsers=lowerCamelCase)
test_command_parser(subparsers=lowerCamelCase)
# Let's go
A_ : Dict = parser.parse_args()
if not hasattr(lowerCamelCase , """func"""):
parser.print_help()
exit(1)
# Run
args.func(lowerCamelCase)
if __name__ == "__main__":
main()
| 665 | 0 |
'''simple docstring'''
import numpy
class snake_case :
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> None:
lowercase__ = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowercase__ = numpy.random.rand(
self.input_array.shape[1] ,4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowercase__ = numpy.random.rand(
4 ,3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowercase__ = numpy.random.rand(3 ,1 )
# Real output values provided.
lowercase__ = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowercase__ = numpy.zeros(output_array.shape )
def _a ( self ) -> numpy.ndarray:
lowercase__ = sigmoid(
numpy.dot(self.input_array ,self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return self.layer_between_second_hidden_layer_and_output
def _a ( self ) -> None:
lowercase__ = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T ,2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,)
lowercase__ = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T ,numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,)
lowercase__ = numpy.dot(
self.input_array.T ,numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) ,self.second_hidden_layer_and_output_layer_weights.T ,)
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) ,self.first_hidden_layer_and_second_hidden_layer_weights.T ,)
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) ,)
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> None:
for iteration in range(1 ,iterations + 1 ):
lowercase__ = self.feedforward()
self.back_propagation()
if give_loss:
lowercase__ = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def _a ( self ,UpperCAmelCase_ ) -> int:
lowercase__ = input_arr
lowercase__ = sigmoid(
numpy.dot(self.array ,self.input_layer_and_first_hidden_layer_weights ) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer ,self.first_hidden_layer_and_second_hidden_layer_weights ,) )
lowercase__ = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer ,self.second_hidden_layer_and_output_layer_weights ,) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCamelCase ( _snake_case : numpy.ndarray ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowerCamelCase ( _snake_case : numpy.ndarray ):
'''simple docstring'''
return (value) * (1 - (value))
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) ,dtype=numpy.floataa ,)
# True output values for the given input values.
lowercase__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) ,dtype=numpy.floataa )
# Calling neural network class.
lowercase__ = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case ,output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case ,iterations=10 ,give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) ,dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 539 |
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
SCREAMING_SNAKE_CASE__ = "1"
SCREAMING_SNAKE_CASE__ = "0"
SCREAMING_SNAKE_CASE__ = "1"
SCREAMING_SNAKE_CASE__ = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print("Create inference session...")
SCREAMING_SNAKE_CASE__ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"]
SCREAMING_SNAKE_CASE__ = ort.InferenceSession("model.onnx", sess_options=sess_opt, providers=execution_provider)
SCREAMING_SNAKE_CASE__ = ort.RunOptions()
SCREAMING_SNAKE_CASE__ = 128
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
SCREAMING_SNAKE_CASE__ = np.ones((batch, sequence), dtype=np.intaa)
print("Warm up phase...")
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Start inference...")
SCREAMING_SNAKE_CASE__ = time.time()
SCREAMING_SNAKE_CASE__ = 2000
SCREAMING_SNAKE_CASE__ = {}
for iter in range(max_iters):
SCREAMING_SNAKE_CASE__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print("Average Inference Time = {:.3f} ms".format((time.time() - start_time) * 1000 / max_iters))
| 539 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCAmelCase__ ( ):
__a : Any = torch.nn.Linear(2 , 4 )
__a : int = torch.optim.AdamW(model.parameters() , lr=1.0 )
__a : Tuple = torch.optim.lr_scheduler.OneCycleLR(SCREAMING_SNAKE_CASE__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__a : Union[str, Any] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__a : str = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
@require_cuda
def __magic_name__ ( self ) -> Optional[Any]:
__a : Optional[Any] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(_A ):
__a : Optional[Any] = Accelerator(cpu=_A )
def __magic_name__ ( self ) -> Union[str, Any]:
__a : Optional[Any] = Accelerator()
__a : List[str] = GradientState()
assert state.num_steps == 1
__a : Tuple = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__a : List[str] = False
assert state.sync_gradients is False
GradientState._reset_state()
def __magic_name__ ( self ) -> Optional[int]:
__a : Any = Accelerator()
__a , __a , __a , __a , __a : Optional[int] = create_components()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : Optional[Any] = accelerator.prepare(_A , _A , _A , _A , _A )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __magic_name__ ( self ) -> str:
__a : Union[str, Any] = Accelerator()
__a , __a , __a , __a , __a : List[Any] = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __magic_name__ ( self ) -> Optional[int]:
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_A , **_A ):
pass
with patch('torch.cuda.set_device' , _A ), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64' ):
__a : Optional[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , 'cuda:64' )
def __magic_name__ ( self ) -> Tuple:
__a : Optional[int] = Accelerator()
__a , __a , __a , __a , __a : str = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
__a : str = get_signature(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
def __magic_name__ ( self ) -> Optional[int]:
__a : List[Any] = Accelerator()
__a , __a , __a , __a , __a : Any = create_components()
accelerator.prepare(_A , _A , _A , _A , _A )
__a : Any = get_signature(_A )
# saving hook
def save_config(_A , _A , _A ):
__a : Any = {'class_name': models[0].__class__.__name__}
with open(os.path.join(_A , 'data.json' ) , 'w' ) as f:
json.dump(_A , _A )
# loading hook
def load_config(_A , _A ):
with open(os.path.join(_A , 'data.json' ) , 'r' ) as f:
__a : Tuple = json.load(_A )
__a : Union[str, Any] = config['class_name']
__a : List[Any] = accelerator.register_save_state_pre_hook(_A )
__a : str = accelerator.register_load_state_pre_hook(_A )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match with hooks
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__a : Dict = 'random'
# make sure loaded weights match with hooks
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(_A )
# make sure random weights don't match with hooks removed
load_random_weights(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) > 1E-3 )
# random class name to verify correct one is loaded
__a : Union[str, Any] = 'random'
# make sure loaded weights match with hooks removed
accelerator.load_state(_A )
self.assertTrue(abs(model_signature - get_signature(_A ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__a : str = Accelerator()
__a , __a , __a , __a , __a : Union[str, Any] = create_components()
__a : Tuple = None
# This should work
__a , __a , __a , __a , __a , __a : str = accelerator.prepare(
_A , _A , _A , _A , _A , _A )
self.assertTrue(dummy_obj is None )
def __magic_name__ ( self ) -> Dict:
__a : Tuple = Accelerator()
__a , __a , __a , __a , __a : List[Any] = create_components()
__a : str = [1, 2, 3]
# This should work
__a , __a , __a , __a , __a , __a : int = accelerator.prepare(
_A , _A , _A , _A , _A , _A )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Dummy object should have `_is_accelerate_prepared` set to `True`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Model is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
self.assertEqual(
getattr(_A , '_is_accelerate_prepared' , _A ) , _A , 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`' , )
@slow
@require_bnb
def __magic_name__ ( self ) -> Dict:
from transformers import AutoModelForCausalLM
__a : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map={'': 0} , )
__a : Tuple = Accelerator()
# This should work
__a : int = accelerator.prepare(_A )
@slow
@require_bnb
def __magic_name__ ( self ) -> Dict:
from transformers import AutoModelForCausalLM
__a : List[Any] = Accelerator()
with init_empty_weights():
__a : str = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Optional[Any] = infer_auto_device_map(_A )
__a : List[str] = 'cpu'
__a : List[Any] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , device_map=_A , load_in_abit=_A , llm_inta_enable_fpaa_cpu_offload=_A )
# This should not work and get value error
with self.assertRaises(_A ):
__a : int = accelerator.prepare(_A )
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self ) -> Any:
from transformers import AutoModelForCausalLM
__a : str = {'distributed_type': DistributedType.MULTI_GPU}
with init_empty_weights():
__a : Tuple = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
model.tie_weights()
__a : Dict = infer_auto_device_map(_A )
__a : Optional[Any] = 1
__a : List[str] = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map=_A , )
__a : Optional[int] = Accelerator()
# This should not work and get value error
with self.assertRaises(_A ):
__a : str = accelerator.prepare(_A )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __magic_name__ ( self ) -> int:
from transformers import AutoModelForCausalLM
with init_empty_weights():
__a : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , )
__a : str = infer_auto_device_map(_A )
__a : Dict = 1
__a : int = AutoModelForCausalLM.from_pretrained(
'EleutherAI/gpt-neo-125m' , load_in_abit=_A , device_map=_A , )
__a : List[str] = Accelerator()
# This should work
__a : List[str] = accelerator.prepare(_A )
@require_cuda
def __magic_name__ ( self ) -> Dict:
__a : List[str] = torch.nn.Linear(10 , 10 )
__a : Optional[Any] = torch.optim.SGD(model.parameters() , lr=0.01 )
__a : Any = Accelerator(cpu=_A )
__a : Optional[Any] = accelerator.prepare(_A )
| 597 |
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return abs(SCREAMING_SNAKE_CASE__ ) if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
__a , __a : int = y, x % y
return abs(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( ):
try:
__a : Optional[Any] = input('Enter two integers separated by comma (,): ' ).split(',' )
__a : Any = int(nums[0] )
__a : Tuple = int(nums[1] )
print(
f'''greatest_common_divisor({num_a}, {num_a}) = '''
f'''{greatest_common_divisor(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
print(f'''By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}''' )
except (IndexError, UnboundLocalError, ValueError):
print('Wrong input' )
if __name__ == "__main__":
main()
| 597 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
return str(UpperCAmelCase__ ) == str(UpperCAmelCase__ )[::-1]
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int:
return int(UpperCAmelCase__ ) + int(str(UpperCAmelCase__ )[::-1] )
def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00_00 ) -> int:
A_ = []
for num in range(1, UpperCAmelCase__ ):
A_ = 0
A_ = num
while iterations < 50:
A_ = sum_reverse(UpperCAmelCase__ )
iterations += 1
if is_palindrome(UpperCAmelCase__ ):
break
else:
lychrel_nums.append(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> bool:
if num < 0:
return False
A_ = num
A_ = 0
while num > 0:
A_ = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 | 0 |
def lowerCAmelCase_ ( ):
UpperCamelCase_ : List[str] = 0
for i in range(1 , 1001 ):
total += i**i
return str(_A )[-10:]
if __name__ == "__main__":
print(solution())
| 635 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def __A ( _A , _A , _A , _A ):
"""simple docstring"""
__a = original_name.split("." )[0]
__a = key.split("." )
__a = int(key_list[key_list.index(_A ) - 2] )
__a = int(key_list[key_list.index(_A ) - 1] )
__a = orig_block_num - offset
__a = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __A ( _A ):
"""simple docstring"""
__a = OrderedDict()
__a , __a = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
__a = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
__a = key[: key.find("proj" )]
__a = key.replace(_A , f"""patch_embeddings.{total_embed_found}.""" )
__a = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
__a = "poolformer.encoder." + key
if "mlp.fc1" in key:
__a = replace_key_with_offset(_A , _A , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
__a = replace_key_with_offset(_A , _A , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
__a = replace_key_with_offset(_A , _A , "norm1" , "before_norm" )
if "norm2" in key:
__a = replace_key_with_offset(_A , _A , "norm2" , "after_norm" )
if "layer_scale_1" in key:
__a = replace_key_with_offset(_A , _A , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
__a = replace_key_with_offset(_A , _A , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
__a = key.replace("head" , "classifier" )
__a = value
return new_state_dict
def __A ( ):
"""simple docstring"""
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_A , stream=_A ).raw )
return image
@torch.no_grad()
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = PoolFormerConfig()
# set attributes based on model_name
__a = "huggingface/label-files"
__a = model_name[-3:]
__a = 1000
__a = "imagenet-1k-id2label.json"
__a = (1, 1000)
# set config attributes
__a = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
if size == "s12":
__a = [2, 2, 6, 2]
__a = [64, 128, 320, 512]
__a = 4.0
__a = 0.9
elif size == "s24":
__a = [4, 4, 12, 4]
__a = [64, 128, 320, 512]
__a = 4.0
__a = 0.9
elif size == "s36":
__a = [6, 6, 18, 6]
__a = [64, 128, 320, 512]
__a = 4.0
__a = 1E-6
__a = 0.9
elif size == "m36":
__a = [6, 6, 18, 6]
__a = [96, 192, 384, 768]
__a = 4.0
__a = 1E-6
__a = 0.95
elif size == "m48":
__a = [8, 8, 24, 8]
__a = [96, 192, 384, 768]
__a = 4.0
__a = 1E-6
__a = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
__a = PoolFormerImageProcessor(crop_pct=_A )
# Prepare image
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
__a = torch.load(_A , map_location=torch.device("cpu" ) )
# rename keys
__a = rename_keys(_A )
# create HuggingFace model and load state dict
__a = PoolFormerForImageClassification(_A )
model.load_state_dict(_A )
model.eval()
# Define image processor
__a = PoolFormerImageProcessor(crop_pct=_A )
__a = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
__a = model(_A )
__a = outputs.logits
# define expected logit slices for different models
if size == "s12":
__a = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__a = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__a = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__a = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__a = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _A , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Any = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 197 | 0 |
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
SCREAMING_SNAKE_CASE : int = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class snake_case ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self, _lowercase = " " ) -> str:
SCREAMING_SNAKE_CASE_ = sentence_delimiter
def a__ ( self, _lowercase ) -> Optional[Any]:
return list(__A )
def a__ ( self, _lowercase ) -> Any:
SCREAMING_SNAKE_CASE_ = []
for sent_idx, sentence in enumerate(__A ):
chars.extend(self.process_string(__A ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__A ) - 1:
chars.append(self.sentence_delimiter )
return chars
SCREAMING_SNAKE_CASE : List[str] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
SCREAMING_SNAKE_CASE : Any = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
SCREAMING_SNAKE_CASE : int = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
SCREAMING_SNAKE_CASE : List[str] = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
SCREAMING_SNAKE_CASE : int = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class snake_case ( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('string', id='sequence' ),
'references': datasets.Value('string', id='sequence' ),
} ), codebase_urls=['https://github.com/jitsi/jiwer/'], reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
], )
def a__ ( self, _lowercase, _lowercase, _lowercase=False ) -> str:
if concatenate_texts:
return jiwer.compute_measures(
__A, __A, truth_transform=__A, hypothesis_transform=__A, )["wer"]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for prediction, reference in zip(__A, __A ):
SCREAMING_SNAKE_CASE_ = jiwer.compute_measures(
__A, __A, truth_transform=__A, hypothesis_transform=__A, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[int] = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class snake_case ( lowercase_ ):
"""simple docstring"""
_a = """xmod"""
def __init__( self, _lowercase=30522, _lowercase=768, _lowercase=12, _lowercase=12, _lowercase=3072, _lowercase="gelu", _lowercase=0.1, _lowercase=0.1, _lowercase=512, _lowercase=2, _lowercase=0.02, _lowercase=1E-12, _lowercase=1, _lowercase=0, _lowercase=2, _lowercase="absolute", _lowercase=True, _lowercase=None, _lowercase=False, _lowercase=2, _lowercase=False, _lowercase=True, _lowercase=True, _lowercase=("en_XX",), _lowercase=None, **_lowercase, ) -> Optional[Any]:
super().__init__(pad_token_id=_lowercase, bos_token_id=_lowercase, eos_token_id=_lowercase, **_lowercase )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = position_embedding_type
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = classifier_dropout
SCREAMING_SNAKE_CASE_ = pre_norm
SCREAMING_SNAKE_CASE_ = adapter_reduction_factor
SCREAMING_SNAKE_CASE_ = adapter_layer_norm
SCREAMING_SNAKE_CASE_ = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_ = ln_before_adapter
SCREAMING_SNAKE_CASE_ = list(_lowercase )
SCREAMING_SNAKE_CASE_ = default_language
class snake_case ( lowercase_ ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 238 | 0 |
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
A : int = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCamelCase( lowerCAmelCase_ ):
snake_case_ : Tuple = 'facebook/nllb-200-distilled-600M'
snake_case_ : str = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case_ : List[Any] = 'translator'
snake_case_ : Dict = AutoTokenizer
snake_case_ : Optional[int] = AutoModelForSeqaSeqLM
snake_case_ : Optional[int] = LANGUAGE_CODES
snake_case_ : Union[str, Any] = ['text', 'text', 'text']
snake_case_ : Union[str, Any] = ['text']
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'''{tgt_lang} is not a supported language.''' )
__snake_case = self.lang_to_code[src_lang]
__snake_case = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
A__ , return_tensors="pt" , src_lang=A__ , tgt_lang=A__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(**A__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ) -> List[Any]:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=A__ )
| 371 |
'''simple docstring'''
from torch import nn
def __lowercase (_lowercase ) -> Union[str, Any]:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"Unsupported activation function: {act_fn}" )
| 150 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Optional[Any] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Dict = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_snake_case : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 706 |
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowerCAmelCase ( __UpperCAmelCase ):
a : Dict = DistilBertTokenizer
a : Tuple = DistilBertTokenizerFast
a : List[str] = True
@slow
def lowercase ( self ):
_SCREAMING_SNAKE_CASE = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" )
_SCREAMING_SNAKE_CASE = tokenizer.encode("sequence builders" , add_special_tokens=UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCamelCase )
_SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
] | 493 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( __A : list ):
a_ : List[str] = 0
while len(__A ) > 1:
a_ : List[Any] = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
a_ : Any = files.index(min(__A ) )
temp += files[min_index]
files.pop(__A )
files.append(__A )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 466 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCAmelCase = object()
# For specifying empty leaf dict `{}`
__lowerCAmelCase = object()
def _UpperCAmelCase ( __A : List[str] , __A : Tuple ):
a_ : List[Any] = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(__A ) - len(__A ) + 1 ):
a_ : Union[str, Any] = [x.match(__A ) for x, y in zip(__A , ks[i:] )]
if matches and all(__A ):
return True
return False
def _UpperCAmelCase ( __A : List[str] ):
def replace(__A : int , __A : Union[str, Any] ):
for rule, replacement in rules:
if _match(__A , __A ):
return replacement
return val
return replace
def _UpperCAmelCase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , __A )),
(("transformer", "wte", "embedding"), P('''mp''' , __A )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__A , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , __A )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__A , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , __A )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def _UpperCAmelCase ( __A : Union[str, Any] ):
a_ : Tuple = _get_partition_rules()
a_ : Tuple = _replacement_rules(__A )
a_ : Optional[Any] = {k: _unmatched for k in flatten_dict(__A )}
a_ : Optional[Any] = {k: replace(__A , __A ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__A ) )
| 466 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Optional[Any] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 610 | '''simple docstring'''
import os
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
"""simple docstring"""
a : List[str] = os.path.join(os.path.dirname(snake_case ) , 'num.txt' )
with open(snake_case ) as file_hand:
return str(sum(int(snake_case ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 610 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : Any ) -> Tuple:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
A = mock.Mock()
A = 500
A = {}
A = HTTPError
A = {}
# Download this model to make sure it's in the cache.
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__UpperCamelCase ) as mock_head:
A = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# This test is for deprecated behavior and can be removed in v5
try:
A = tempfile.mktemp()
with open(__UpperCamelCase , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , __UpperCamelCase )
A = AlbertTokenizer.from_pretrained(__UpperCamelCase )
finally:
os.remove(__UpperCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , __UpperCamelCase )
A = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def __UpperCamelCase ( self : str ) -> int:
# This test is for deprecated behavior and can be removed in v5
A = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
A_ : str = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __UpperCamelCase ( cls : Tuple ) -> Dict:
A = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def __UpperCamelCase ( cls : Any ) -> Dict:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__UpperCamelCase , repo_id='test-tokenizer' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizer(__UpperCamelCase )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__UpperCamelCase , repo_id='valid_org/test-tokenizer-org' , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
A = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def __UpperCamelCase ( self : Any ) -> Union[str, Any]:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = CustomTokenizer(__UpperCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A = os.path.join(__UpperCamelCase , 'vocab.txt' )
with open(__UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
A = BertTokenizerFast.from_pretrained(__UpperCamelCase )
bert_tokenizer.save_pretrained(__UpperCamelCase )
A = CustomTokenizerFast.from_pretrained(__UpperCamelCase )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
A = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
A = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''' , use_fast=__UpperCamelCase , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowerCAmelCase__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ) -> Any:
A = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def __UpperCamelCase ( self : Dict ) -> Tuple:
A = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def __UpperCamelCase ( self : Any ) -> Optional[int]:
A = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> str:
A = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
A = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def __UpperCamelCase ( self : List[str] ) -> Tuple:
A = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
A = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
A = Trie()
A = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__UpperCamelCase , ['AB', 'C'] ) | 106 |
'''simple docstring'''
import os
lowerCAmelCase :Dict = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : str = 0
__magic_name__ : Optional[Any] = 0
while index < len(lowerCAmelCase ) - 1:
__magic_name__ : Any = SYMBOLS[numerals[index]]
__magic_name__ : Optional[int] = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Tuple = ''
__magic_name__ : Dict = num // 1000
numerals += m_count * "M"
num %= 1000
__magic_name__ : int = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
__magic_name__ : List[str] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowerCamelCase ( lowerCAmelCase : str = "/p089_roman.txt" ):
"""simple docstring"""
__magic_name__ : int = 0
with open(os.path.dirname(lowerCAmelCase ) + roman_numerals_filename ) as filea:
__magic_name__ : str = filea.readlines()
for line in lines:
__magic_name__ : Dict = line.strip()
__magic_name__ : List[Any] = parse_roman_numerals(lowerCAmelCase )
__magic_name__ : Union[str, Any] = generate_roman_numerals(lowerCAmelCase )
savings += len(lowerCAmelCase ) - len(lowerCAmelCase )
return savings
if __name__ == "__main__":
print(F'{solution() = }') | 561 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ : str = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : int = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : str = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number | (1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number & ~(1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return number ^ (1 << position)
def _A ( _UpperCamelCase , _UpperCamelCase ):
return ((number >> position) & 1) == 1
def _A ( _UpperCamelCase , _UpperCamelCase ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 416 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
set_seed(770)
lowerCAmelCase_ : Dict = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
lowerCAmelCase_ : int = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
lowerCAmelCase_ : List[str] = os.path.dirname(os.path.abspath(__file__))
lowerCAmelCase_ : List[Any] = os.path.join(os.path.expanduser('~'), '.cache')
lowerCAmelCase_ : Any = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=False ):
"""simple docstring"""
a_ : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(__A , REMOTE_MODEL_PATHS[key]["""file_name"""] )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Any , UpperCamelCase__ : int ):
"""simple docstring"""
os.makedirs(__A , exist_ok=__A )
hf_hub_download(repo_id=__A , filename=__A , local_dir=__A )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any="text" ):
"""simple docstring"""
if model_type == "text":
a_ : Dict = BarkSemanticModel
a_ : Optional[Any] = BarkSemanticConfig
a_ : Dict = BarkSemanticGenerationConfig
elif model_type == "coarse":
a_ : Optional[Any] = BarkCoarseModel
a_ : Any = BarkCoarseConfig
a_ : List[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
a_ : Dict = BarkFineModel
a_ : Any = BarkFineConfig
a_ : Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
a_ : Tuple = F"{model_type}_small" if use_small else model_type
a_ : List[Any] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(__A ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info["""repo_id"""] , model_info["""file_name"""] )
a_ : List[str] = torch.load(__A , map_location=__A )
# this is a hack
a_ : Optional[Any] = checkpoint["""model_args"""]
if "input_vocab_size" not in model_args:
a_ : Tuple = model_args["""vocab_size"""]
a_ : Tuple = model_args["""vocab_size"""]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
a_ : Any = model_args.pop("""n_head""" )
a_ : int = model_args.pop("""n_embd""" )
a_ : List[Any] = model_args.pop("""n_layer""" )
a_ : List[Any] = ConfigClass(**checkpoint["""model_args"""] )
a_ : Tuple = ModelClass(config=__A )
a_ : str = GenerationConfigClass()
a_ : Optional[int] = model_generation_config
a_ : Optional[int] = checkpoint["""model"""]
# fixup checkpoint
a_ : str = """_orig_mod."""
for k, v in list(state_dict.items() ):
if k.startswith(__A ):
# replace part of the key with corresponding layer name in HF implementation
a_ : Dict = k[len(__A ) :]
for old_layer_name in new_layer_name_dict:
a_ : Optional[Any] = new_k.replace(__A , new_layer_name_dict[old_layer_name] )
a_ : Union[str, Any] = state_dict.pop(__A )
a_ : str = set(state_dict.keys() ) - set(model.state_dict().keys() )
a_ : Optional[int] = {k for k in extra_keys if not k.endswith(""".attn.bias""" )}
a_ : Optional[Any] = set(model.state_dict().keys() ) - set(state_dict.keys() )
a_ : List[str] = {k for k in missing_keys if not k.endswith(""".attn.bias""" )}
if len(__A ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(__A ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(__A , strict=__A )
a_ : Union[str, Any] = model.num_parameters(exclude_embeddings=__A )
a_ : int = checkpoint["""best_val_loss"""].item()
logger.info(F"model loaded: {round(n_params/1E6 , 1 )}M params, {round(__A , 3 )} loss" )
model.eval()
model.to(__A )
del checkpoint, state_dict
return model
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Any="text" ):
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
a_ : List[str] = """cpu""" # do conversion on cpu
a_ : Dict = _get_ckpt_path(__A , use_small=__A )
a_ : Tuple = _load_model(__A , __A , model_type=__A , use_small=__A )
# load bark initial model
a_ : int = _bark_load_model(__A , """cpu""" , model_type=__A , use_small=__A )
if model_type == "text":
a_ : Union[str, Any] = bark_model["""model"""]
if model.num_parameters(exclude_embeddings=__A ) != bark_model.get_num_params():
raise ValueError("""initial and new models don't have the same number of parameters""" )
# check if same output as the bark model
a_ : str = 5
a_ : Dict = 10
if model_type in ["text", "coarse"]:
a_ : Optional[Any] = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
a_ : Optional[int] = bark_model(__A )[0]
a_ : Optional[Any] = model(__A )
# take last logits
a_ : Optional[int] = output_new_model_total.logits[:, [-1], :]
else:
a_ : int = 3
a_ : Tuple = 8
a_ : int = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
a_ : List[Any] = model(__A , __A )
a_ : Optional[Any] = bark_model(__A , __A )
a_ : str = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("""initial and new outputs don't have the same shape""" )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError("""initial and new outputs are not equal""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , ):
"""simple docstring"""
a_ : Optional[int] = os.path.join(__A , __A )
a_ : List[str] = BarkSemanticConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
a_ : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
a_ : str = BarkFineConfig.from_pretrained(os.path.join(__A , """config.json""" ) )
a_ : List[Any] = EncodecConfig.from_pretrained("""facebook/encodec_24khz""" )
a_ : List[str] = BarkSemanticModel.from_pretrained(__A )
a_ : List[Any] = BarkCoarseModel.from_pretrained(__A )
a_ : str = BarkFineModel.from_pretrained(__A )
a_ : Dict = EncodecModel.from_pretrained("""facebook/encodec_24khz""" )
a_ : Tuple = BarkConfig.from_sub_model_configs(
__A , __A , __A , __A )
a_ : Union[str, Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
a_ : str = BarkModel(__A )
a_ : int = semantic
a_ : Optional[Any] = coarseAcoustic
a_ : Dict = fineAcoustic
a_ : int = codec
a_ : Dict = bark_generation_config
Path(__A ).mkdir(exist_ok=__A )
bark.save_pretrained(__A , repo_id=__A , push_to_hub=__A )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
lowerCAmelCase_ : Dict = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 442 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowercase : Any = logging.getLogger(__name__)
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : str
__lowerCamelCase : str
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
__lowerCamelCase : Optional[str] = None
@dataclass(frozen=snake_case )
class _A :
'''simple docstring'''
__lowerCamelCase : List[int]
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[List[int]] = None
__lowerCamelCase : Optional[Union[int, float]] = None
__lowerCamelCase : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : str = hans_processors[task]()
snake_case : str = os.path.join(
SCREAMING_SNAKE_CASE_ ,"""cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" ,tokenizer.__class__.__name__ ,str(SCREAMING_SNAKE_CASE_ ) ,SCREAMING_SNAKE_CASE_ ,) ,)
snake_case : Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : List[Any] = label_list[2], label_list[1]
snake_case : List[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case : Any = cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE_ ):
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
snake_case : int = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
snake_case : Union[str, Any] = (
processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
)
logger.info("""Training examples: %s""" ,len(SCREAMING_SNAKE_CASE_ ) )
snake_case : Dict = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
logger.info("""Saving features into cached file %s""" ,SCREAMING_SNAKE_CASE_ )
torch.save(self.features ,SCREAMING_SNAKE_CASE_ )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class _A :
'''simple docstring'''
__lowerCamelCase : List[InputFeatures]
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 128 ,SCREAMING_SNAKE_CASE_=False ,SCREAMING_SNAKE_CASE_ = False ,):
'''simple docstring'''
snake_case : Any = hans_processors[task]()
snake_case : List[str] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case , snake_case : int = label_list[2], label_list[1]
snake_case : List[str] = label_list
snake_case : int = processor.get_dev_examples(SCREAMING_SNAKE_CASE_ ) if evaluate else processor.get_train_examples(SCREAMING_SNAKE_CASE_ )
snake_case : Any = hans_convert_examples_to_features(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) ,desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(SCREAMING_SNAKE_CASE_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case : Any = tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE_ ,(
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) ,(
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) ,)
def snake_case_ ( self ):
'''simple docstring'''
return self.dataset
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self.features[i]
def snake_case_ ( self ):
'''simple docstring'''
return self.label_list
class _A ( snake_case ):
'''simple docstring'''
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_train_set.txt""" ) ) ,"""train""" )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(SCREAMING_SNAKE_CASE_ ,"""heuristics_evaluation_set.txt""" ) ) ,"""dev""" )
def snake_case_ ( self ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case : List[str] = []
for i, line in enumerate(SCREAMING_SNAKE_CASE_ ):
if i == 0:
continue
snake_case : Any = """%s-%s""" % (set_type, line[0])
snake_case : Optional[int] = line[5]
snake_case : Union[str, Any] = line[6]
snake_case : Optional[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case : Dict = line[0]
examples.append(InputExample(guid=SCREAMING_SNAKE_CASE_ ,text_a=SCREAMING_SNAKE_CASE_ ,text_b=SCREAMING_SNAKE_CASE_ ,label=SCREAMING_SNAKE_CASE_ ,pairID=SCREAMING_SNAKE_CASE_ ) )
return examples
def lowercase ( __A : List[InputExample] , __A : List[str] , __A : int , __A : PreTrainedTokenizer , ) -> Tuple:
'''simple docstring'''
snake_case : List[Any] = {label: i for i, label in enumerate(__A )}
snake_case : Union[str, Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__A ) , desc="""convert examples to features""" ):
if ex_index % 1_0000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case : Union[str, Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__A , max_length=__A , padding="""max_length""" , truncation=__A , return_overflowing_tokens=__A , )
snake_case : Tuple = label_map[example.label] if example.label in label_map else 0
snake_case : Tuple = int(example.pairID )
features.append(InputFeatures(**__A , label=__A , pairID=__A ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__lowercase : Dict = {
'''hans''': 3,
}
__lowercase : Union[str, Any] = {
'''hans''': HansProcessor,
}
| 36 | 0 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
__A = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_55 , __UpperCamelCase = True , __UpperCamelCase = IMAGENET_DEFAULT_MEAN , __UpperCamelCase = IMAGENET_DEFAULT_STD , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
snake_case_ = size if size is not None else {'shortest_edge': 2_24}
snake_case_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
snake_case_ = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
snake_case_ = get_size_dict(__UpperCamelCase , param_name='crop_size' )
snake_case_ = do_resize
snake_case_ = size
snake_case_ = resample
snake_case_ = do_center_crop
snake_case_ = crop_size
snake_case_ = do_rescale
snake_case_ = rescale_factor
snake_case_ = do_normalize
snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
snake_case_ = int((2_56 / 2_24) * size['shortest_edge'] )
snake_case_ = get_resize_output_image_size(__UpperCamelCase , size=__UpperCamelCase , default_to_square=__UpperCamelCase )
snake_case_ = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__UpperCamelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size['height'], size['width']) , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
"""simple docstring"""
snake_case_ = do_resize if do_resize is not None else self.do_resize
snake_case_ = resample if resample is not None else self.resample
snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ = image_mean if image_mean is not None else self.image_mean
snake_case_ = image_std if image_std is not None else self.image_std
snake_case_ = size if size is not None else self.size
snake_case_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
snake_case_ = crop_size if crop_size is not None else self.crop_size
snake_case_ = get_size_dict(__UpperCamelCase , param_name='crop_size' )
snake_case_ = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
snake_case_ = [self.resize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
if do_center_crop:
snake_case_ = [self.center_crop(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_rescale:
snake_case_ = [self.rescale(__UpperCamelCase , __UpperCamelCase ) for image in images]
if do_normalize:
snake_case_ = [self.normalize(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for image in images]
snake_case_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
snake_case_ = {'pixel_values': images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 46 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
__A = LEDConfig
__A = {}
__A = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=20 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , __UpperCamelCase=4 , ):
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
snake_case_ = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
snake_case_ = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
snake_case_ = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
snake_case_ = prepare_led_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
snake_case_ = tf.concat(
[tf.zeros_like(__UpperCamelCase )[:, :-1], tf.ones_like(__UpperCamelCase )[:, -1:]] , axis=-1 , )
snake_case_ = global_attention_mask
return config, inputs_dict
def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
snake_case_ = TFLEDModel(config=__UpperCamelCase ).get_decoder()
snake_case_ = inputs_dict['input_ids']
snake_case_ = input_ids[:1, :]
snake_case_ = inputs_dict['attention_mask'][:1, :]
snake_case_ = 1
# first forward pass
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
snake_case_ , snake_case_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case_ = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case_ = output_from_no_past[:, -3:, random_slice_idx]
snake_case_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__UpperCamelCase , __UpperCamelCase , rtol=1E-3 )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__A = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__A = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__A = (
{
"""conversational""": TFLEDForConditionalGeneration,
"""feature-extraction""": TFLEDModel,
"""summarization""": TFLEDForConditionalGeneration,
"""text2text-generation""": TFLEDForConditionalGeneration,
"""translation""": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDModelTester(self )
snake_case_ = ConfigTester(self , config_class=__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__UpperCamelCase )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = tf.zeros_like(inputs_dict['attention_mask'] )
snake_case_ = 2
snake_case_ = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['global_attention_mask'] , )
snake_case_ = True
snake_case_ = self.model_tester.seq_length
snake_case_ = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__UpperCamelCase ):
snake_case_ = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__UpperCamelCase ):
snake_case_ = [t.numpy() for t in outputs.encoder_attentions]
snake_case_ = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
snake_case_ = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
snake_case_ = True
snake_case_ = True
snake_case_ = model_class(__UpperCamelCase )
snake_case_ = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@unittest.skip('LED keeps using potentially symbolic tensors in conditionals and breaks tracing.' )
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def __lowerCAmelCase ( self ):
"""simple docstring"""
pass
def a(lowercase__ ):
'''simple docstring'''
return tf.constant(lowercase__ , dtype=tf.intaa )
A = 1e-4
@slow
@require_tf
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' ).led
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, 7_68)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 )
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = TFLEDForConditionalGeneration.from_pretrained('allenai/led-base-16384' )
# change to intended input here
snake_case_ = _long_tensor([5_12 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = _long_tensor([1_28 * [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69]] )
snake_case_ = prepare_led_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
snake_case_ = model(**__UpperCamelCase )[0]
snake_case_ = (1, 10_24, model.config.vocab_size)
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
snake_case_ = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1E-3 , rtol=1E-3 )
| 46 | 1 |
def A(__a: int , __a: int ):
return 1 if input_a == input_a else 0
def A():
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 122 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ (__lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase__ = StableDiffusionXLImgaImgPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=_a , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
lowerCAmelCase_ = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=32 , )
lowerCAmelCase_ = CLIPTextModel(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowerCAmelCase_ = CLIPTextModelWithProjection(_a )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=_a )
lowerCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __a ( self , _a , _a=0 ) -> Dict:
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_a ) ).to(_a )
lowerCAmelCase_ = image / 2 + 0.5
if str(_a ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_a )
else:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline(**_a )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = sd_pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase_ = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __a ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ) -> str:
pass
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionXLImgaImgPipeline(**_a )
lowerCAmelCase_ = sd_pipe.to(_a )
lowerCAmelCase_ = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
# forward without prompt embeds
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * ["this is a negative prompt"]
lowerCAmelCase_ = negative_prompt
lowerCAmelCase_ = 3 * [inputs["prompt"]]
lowerCAmelCase_ = sd_pipe(**_a )
lowerCAmelCase_ = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase_ = self.get_dummy_inputs(_a )
lowerCAmelCase_ = 3 * ["this is a negative prompt"]
lowerCAmelCase_ = 3 * [inputs.pop("prompt" )]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) = sd_pipe.encode_prompt(_a , negative_prompt=_a )
lowerCAmelCase_ = sd_pipe(
**_a , prompt_embeds=_a , negative_prompt_embeds=_a , pooled_prompt_embeds=_a , negative_pooled_prompt_embeds=_a , )
lowerCAmelCase_ = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __magic_name__ (unittest.TestCase ):
def __a ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _a , _a="cpu" , _a=torch.floataa , _a=0 ) -> Optional[int]:
lowerCAmelCase_ = torch.Generator(device=_a ).manual_seed(_a )
lowerCAmelCase_ = np.random.RandomState(_a ).standard_normal((1, 4, 64, 64) )
lowerCAmelCase_ = torch.from_numpy(_a ).to(device=_a , dtype=_a )
lowerCAmelCase_ = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
lowerCAmelCase_ = self.get_inputs(_a )
lowerCAmelCase_ = pipe(**_a ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowerCAmelCase_ = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 122 | 1 |
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__SCREAMING_SNAKE_CASE : Tuple = Lock()
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
snake_case_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
snake_case_ = min(__lowerCAmelCase , __lowerCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__lowerCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
snake_case_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
snake_case_ = max(__lowerCAmelCase , __lowerCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__lowerCAmelCase )
def _a ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case_ = []
snake_case_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
snake_case_ = Pipe()
snake_case_ = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
snake_case_ = temp_rs
snake_case_ = temp_rr
for i in range(1 , len(__lowerCAmelCase ) - 1 ):
snake_case_ = Pipe()
snake_case_ = Pipe()
process_array_.append(
Process(
target=__lowerCAmelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
snake_case_ = temp_rs
snake_case_ = temp_rr
process_array_.append(
Process(
target=__lowerCAmelCase , args=(
len(__lowerCAmelCase ) - 1,
arr[len(__lowerCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__lowerCAmelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__lowerCAmelCase ) ):
snake_case_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _a ( ) -> Tuple:
snake_case_ = list(range(10 , 0 , -1 ) )
print("""Initial List""" )
print(*__lowerCAmelCase )
snake_case_ = odd_even_transposition(__lowerCAmelCase )
print("""Sorted List\n""" )
print(*__lowerCAmelCase )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Optional[int] = {'vocab_file': 'spiece.model'}
__SCREAMING_SNAKE_CASE : List[str] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
__SCREAMING_SNAKE_CASE : List[str] = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
__SCREAMING_SNAKE_CASE : int = '▁'
class __A (snake_case__):
'''simple docstring'''
__lowercase: Optional[Any] = VOCAB_FILES_NAMES
__lowercase: Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__lowercase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : List[Any]="[CLS]" , UpperCAmelCase_ : Any="[SEP]" , UpperCAmelCase_ : str="<unk>" , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]="<pad>" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : int="[MASK]" , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Union[str, Any] , ) ->None:
"""simple docstring"""
snake_case_ = (
AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ , normalized=UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
else mask_token
)
snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
snake_case_ = do_lower_case
snake_case_ = remove_space
snake_case_ = keep_accents
snake_case_ = vocab_file
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase_ )
@property
def lowerCAmelCase ( self : List[Any] ) ->Dict:
"""simple docstring"""
return len(self.sp_model )
def lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) ->List[str]:
"""simple docstring"""
snake_case_ = self.__dict__.copy()
snake_case_ = None
return state
def __setstate__( self : Tuple , UpperCAmelCase_ : Optional[int] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case_ = {}
snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Any ) ->str:
"""simple docstring"""
if self.remove_space:
snake_case_ = """ """.join(inputs.strip().split() )
else:
snake_case_ = inputs
snake_case_ = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
snake_case_ = unicodedata.normalize("""NFKD""" , UpperCAmelCase_ )
snake_case_ = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_ )] )
if self.do_lower_case:
snake_case_ = outputs.lower()
return outputs
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : str ) ->List[str]:
"""simple docstring"""
snake_case_ = self.preprocess_text(UpperCAmelCase_ )
snake_case_ = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ )
snake_case_ = []
for piece in pieces:
if len(UpperCAmelCase_ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
snake_case_ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case_ = cur_pieces[1:]
else:
snake_case_ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCAmelCase_ )
else:
new_pieces.append(UpperCAmelCase_ )
return new_pieces
def lowerCAmelCase ( self : Optional[int] , UpperCAmelCase_ : Optional[int] ) ->Dict:
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase_ )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict ) ->Any:
"""simple docstring"""
snake_case_ = []
snake_case_ = """"""
snake_case_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase_ ) + token
snake_case_ = True
snake_case_ = []
else:
current_sub_tokens.append(UpperCAmelCase_ )
snake_case_ = False
out_string += self.sp_model.decode(UpperCAmelCase_ )
return out_string.strip()
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False ) ->List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None ) ->List[int]:
"""simple docstring"""
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None ) ->Tuple[str]:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ = os.path.join(
UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase_ , """wb""" ) as fi:
snake_case_ = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_ )
return (out_vocab_file,)
| 2 | 0 |
from math import factorial, pi
def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ):
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowerCAmelCase : Optional[int] = float(__magic_name__ )
lowerCAmelCase : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__magic_name__ ) )
def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : int = 30 ):
'''simple docstring'''
if not isinstance(__magic_name__ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(__magic_name__ , __magic_name__ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowerCAmelCase : Dict = float(__magic_name__ )
lowerCAmelCase : Union[str, Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 348 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
def __init__( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : str=None , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : List[str]="resnet50" , lowerCamelCase__ : str=3 , lowerCamelCase__ : List[Any]=3_2 , lowerCamelCase__ : Dict=3 , lowerCamelCase__ : int=True , lowerCamelCase__ : Dict=True , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : List[Any] = out_indices if out_indices is not None else [4]
lowerCAmelCase : Optional[Any] = stage_names
lowerCAmelCase : List[Any] = out_features
lowerCAmelCase : Optional[Any] = backbone
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = image_size
lowerCAmelCase : List[str] = num_channels
lowerCAmelCase : int = use_pretrained_backbone
lowerCAmelCase : Optional[Any] = is_training
def _A ( self : Any ):
lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values
def _A ( self : int ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _A ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : str ):
lowerCAmelCase : List[str] = TimmBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def _A ( self : int ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase : str = config_and_inputs
lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( snake_case, snake_case, snake_case, unittest.TestCase ):
_lowerCAmelCase = (TimmBackbone,) if is_torch_available() else ()
_lowerCAmelCase = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
def _A ( self : Optional[Any] ):
lowerCAmelCase : Optional[int] = TimmBackboneModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def _A ( self : Optional[Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self : int ):
lowerCAmelCase : Any = '''resnet18'''
lowerCAmelCase : Optional[Any] = '''microsoft/resnet-18'''
lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ )
lowerCAmelCase : Dict = AutoBackbone.from_pretrained(lowerCamelCase__ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(lowerCamelCase__ , use_timm_backbone=lowerCamelCase__ , out_indices=[1, 2, 3] )
lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(lowerCamelCase__ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _A ( self : List[str] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _A ( self : Optional[int] ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _A ( self : int ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _A ( self : str ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _A ( self : Dict ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _A ( self : List[Any] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self : List[str] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _A ( self : Optional[int] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _A ( self : List[Any] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self : int ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self : Tuple ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _A ( self : List[Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _A ( self : Tuple ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _A ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A ( self : int ):
pass
def _A ( self : Union[str, Any] ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
lowerCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : Optional[int] = [*signature.parameters.keys()]
lowerCAmelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def _A ( self : int ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase : Tuple = True
lowerCAmelCase : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase : Dict = self.all_model_classes[0]
lowerCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
lowerCAmelCase : Any = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
lowerCAmelCase : int = model(**lowerCamelCase__ )
lowerCAmelCase : Optional[Any] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase : Any = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _A ( self : Tuple ):
lowerCAmelCase , lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : Optional[int] = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase : str = copy.deepcopy(lowerCamelCase__ )
lowerCAmelCase : int = None
lowerCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : Any = model(**lowerCamelCase__ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
lowerCAmelCase : Any = False
lowerCAmelCase : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
lowerCAmelCase : List[Any] = model(**lowerCamelCase__ )
| 348 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
UpperCAmelCase__ = logging.get_logger(__name__)
class a ( lowerCAmelCase_ ):
_snake_case : Tuple = ['input_features']
def __init__( self : Union[str, Any] , __lowerCAmelCase : Dict=80 , __lowerCAmelCase : str=1_6000 , __lowerCAmelCase : int=160 , __lowerCAmelCase : Any=30 , __lowerCAmelCase : Dict=400 , __lowerCAmelCase : Optional[Any]=0.0 , __lowerCAmelCase : List[str]=False , **__lowerCAmelCase : str , ):
super().__init__(
feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
_UpperCAmelCase = n_fft
_UpperCAmelCase = hop_length
_UpperCAmelCase = chunk_length
_UpperCAmelCase = chunk_length * sampling_rate
_UpperCAmelCase = self.n_samples // hop_length
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCAmelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCAmelCase , norm="""slaney""" , mel_scale="""slaney""" , )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : np.array ):
_UpperCAmelCase = spectrogram(
__lowerCAmelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="""log10""" , )
_UpperCAmelCase = log_spec[:, :-1]
_UpperCAmelCase = np.maximum(__lowerCAmelCase , log_spec.max() - 8.0 )
_UpperCAmelCase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCAmelCase_ ( __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : List[np.ndarray] , __lowerCAmelCase : float = 0.0 ):
if attention_mask is not None:
_UpperCAmelCase = np.array(__lowerCAmelCase , np.intaa )
_UpperCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1 ) ):
_UpperCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_UpperCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase )
else:
_UpperCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : str , __lowerCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[str] = "max_length" , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , **__lowerCAmelCase : Optional[int] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase = isinstance(__lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_UpperCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray ):
_UpperCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa )
elif isinstance(__lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray([raw_speech] ).T]
_UpperCAmelCase = BatchFeature({"""input_features""": raw_speech} )
# convert into correct format for padding
_UpperCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_UpperCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_features"""] , attention_mask=padded_inputs["""attention_mask"""] , padding_value=self.padding_value , )
_UpperCAmelCase = np.stack(padded_inputs["""input_features"""] , axis=0 )
# make sure list is in array format
_UpperCAmelCase = padded_inputs.get("""input_features""" ).transpose(2 , 0 , 1 )
_UpperCAmelCase = [self._np_extract_fbank_features(__lowerCAmelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCAmelCase ):
_UpperCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa ) for feature in input_features]
else:
_UpperCAmelCase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_UpperCAmelCase = padded_inputs["""attention_mask"""][:, :: self.hop_length]
if return_tensors is not None:
_UpperCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase )
return padded_inputs
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 717 | """simple docstring"""
import unittest
from knapsack import knapsack as k
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [0]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
_UpperCAmelCase = [60]
_UpperCAmelCase = [10]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 0 )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = 3
_UpperCAmelCase = [1, 2, 3]
_UpperCAmelCase = [3, 2, 1]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 5 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = 50
_UpperCAmelCase = [60, 100, 120]
_UpperCAmelCase = [10, 20, 30]
_UpperCAmelCase = len(__lowerCAmelCase )
self.assertEqual(k.knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 275 | 0 |
from __future__ import annotations
from collections import Counter
from random import random
class __snake_case :
"""simple docstring"""
def __init__( self ) -> List[Any]:
"""simple docstring"""
__snake_case = {}
def a ( self , _UpperCamelCase ) -> None:
"""simple docstring"""
__snake_case = {}
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> None:
"""simple docstring"""
if nodea not in self.connections:
self.add_node(_UpperCamelCase )
if nodea not in self.connections:
self.add_node(_UpperCamelCase )
__snake_case = probability
def a ( self ) -> list[str]:
"""simple docstring"""
return list(self.connections )
def a ( self , _UpperCamelCase ) -> str:
"""simple docstring"""
__snake_case = 0
__snake_case = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCamelCase__ ( __A :str ,__A :list[tuple[str, str, float]] ,__A :int ):
"""simple docstring"""
__snake_case = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__A ,__A ,__A )
__snake_case = Counter(graph.get_nodes() )
__snake_case = start
for _ in range(__A ):
__snake_case = graph.transition(__A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 |
from PIL import Image
def lowerCamelCase__ ( __A :Image ):
"""simple docstring"""
__snake_case , __snake_case = image.size
__snake_case = 0
__snake_case = image.load()
for i in range(__A ):
for j in range(__A ):
__snake_case = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__A ):
for i in range(__A ):
__snake_case = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
UpperCamelCase__ = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 268 | 1 |
'''simple docstring'''
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , snake_case : int , snake_case : int , snake_case : int , snake_case : Optional[int]=0.0 , snake_case : Optional[int] = None , snake_case : str = "geglu" , snake_case : Optional[int] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : str = "layer_norm" , snake_case : bool = False , ) -> str:
super().__init__()
__UpperCAmelCase : List[str] = only_cross_attention
__UpperCAmelCase : Any = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__UpperCAmelCase : Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__UpperCAmelCase : Dict = AdaLayerNorm(snake_case , snake_case )
elif self.use_ada_layer_norm_zero:
__UpperCAmelCase : Union[str, Any] = AdaLayerNormZero(snake_case , snake_case )
else:
__UpperCAmelCase : Optional[int] = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
__UpperCAmelCase : Optional[int] = Attention(
query_dim=snake_case , heads=snake_case , dim_head=snake_case , dropout=snake_case , bias=snake_case , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__UpperCAmelCase : List[Any] = (
AdaLayerNorm(snake_case , snake_case )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case , elementwise_affine=snake_case )
)
__UpperCAmelCase : List[str] = Attention(
query_dim=snake_case , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case , dim_head=snake_case , dropout=snake_case , bias=snake_case , upcast_attention=snake_case , ) # is self-attn if encoder_hidden_states is none
else:
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : Any = None
# 3. Feed-forward
__UpperCAmelCase : Tuple = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
__UpperCAmelCase : List[Any] = FeedForward(snake_case , dropout=snake_case , activation_fn=snake_case , final_dropout=snake_case )
# let chunk size default to None
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : int = 0
def lowerCamelCase__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : int ) -> Optional[Any]:
# Sets chunk feed-forward
__UpperCAmelCase : int = chunk_size
__UpperCAmelCase : int = dim
def lowerCamelCase__ ( self : str , snake_case : torch.FloatTensor , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.LongTensor] = None , snake_case : Dict[str, Any] = None , snake_case : Optional[torch.LongTensor] = None , ) -> Tuple:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
__UpperCAmelCase : Tuple = self.norma(snake_case , snake_case )
elif self.use_ada_layer_norm_zero:
__UpperCAmelCase : Dict = self.norma(
snake_case , snake_case , snake_case , hidden_dtype=hidden_states.dtype )
else:
__UpperCAmelCase : Tuple = self.norma(snake_case )
__UpperCAmelCase : Union[str, Any] = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__UpperCAmelCase : Union[str, Any] = self.attna(
snake_case , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case , **snake_case , )
if self.use_ada_layer_norm_zero:
__UpperCAmelCase : Optional[int] = gate_msa.unsqueeze(1 ) * attn_output
__UpperCAmelCase : Any = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__UpperCAmelCase : Any = (
self.norma(snake_case , snake_case ) if self.use_ada_layer_norm else self.norma(snake_case )
)
__UpperCAmelCase : Any = self.attna(
snake_case , encoder_hidden_states=snake_case , attention_mask=snake_case , **snake_case , )
__UpperCAmelCase : Any = attn_output + hidden_states
# 3. Feed-forward
__UpperCAmelCase : int = self.norma(snake_case )
if self.use_ada_layer_norm_zero:
__UpperCAmelCase : Tuple = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
__UpperCAmelCase : Tuple = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__UpperCAmelCase : int = torch.cat(
[self.ff(snake_case ) for hid_slice in norm_hidden_states.chunk(snake_case , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__UpperCAmelCase : Union[str, Any] = self.ff(snake_case )
if self.use_ada_layer_norm_zero:
__UpperCAmelCase : List[Any] = gate_mlp.unsqueeze(1 ) * ff_output
__UpperCAmelCase : Union[str, Any] = ff_output + hidden_states
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , snake_case : int , snake_case : Optional[int] = None , snake_case : int = 4 , snake_case : float = 0.0 , snake_case : str = "geglu" , snake_case : bool = False , ) -> Tuple:
super().__init__()
__UpperCAmelCase : int = int(dim * mult )
__UpperCAmelCase : Union[str, Any] = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__UpperCAmelCase : Tuple = GELU(snake_case , snake_case )
if activation_fn == "gelu-approximate":
__UpperCAmelCase : Union[str, Any] = GELU(snake_case , snake_case , approximate='''tanh''' )
elif activation_fn == "geglu":
__UpperCAmelCase : List[str] = GEGLU(snake_case , snake_case )
elif activation_fn == "geglu-approximate":
__UpperCAmelCase : Optional[Any] = ApproximateGELU(snake_case , snake_case )
__UpperCAmelCase : List[Any] = nn.ModuleList([] )
# project in
self.net.append(snake_case )
# project dropout
self.net.append(nn.Dropout(snake_case ) )
# project out
self.net.append(nn.Linear(snake_case , snake_case ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case ) )
def lowerCamelCase__ ( self : Dict , snake_case : int ) -> Dict:
for module in self.net:
__UpperCAmelCase : Any = module(snake_case )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : int , snake_case : int , snake_case : int , snake_case : str = "none" ) -> str:
super().__init__()
__UpperCAmelCase : Union[str, Any] = nn.Linear(snake_case , snake_case )
__UpperCAmelCase : Optional[Any] = approximate
def lowerCamelCase__ ( self : str , snake_case : Any ) -> Any:
if gate.device.type != "mps":
return F.gelu(snake_case , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self : Any , snake_case : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = self.proj(snake_case )
__UpperCAmelCase : int = self.gelu(snake_case )
return hidden_states
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , snake_case : int , snake_case : int ) -> List[Any]:
super().__init__()
__UpperCAmelCase : str = nn.Linear(snake_case , dim_out * 2 )
def lowerCamelCase__ ( self : str , snake_case : List[str] ) -> Dict:
if gate.device.type != "mps":
return F.gelu(snake_case )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : Any ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.proj(snake_case ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , snake_case : int , snake_case : int ) -> Dict:
super().__init__()
__UpperCAmelCase : Dict = nn.Linear(snake_case , snake_case )
def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] ) -> str:
__UpperCAmelCase : Optional[int] = self.proj(snake_case )
return x * torch.sigmoid(1.702 * x )
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : int , snake_case : Tuple , snake_case : Optional[Any] ) -> Optional[Any]:
super().__init__()
__UpperCAmelCase : List[str] = nn.Embedding(snake_case , snake_case )
__UpperCAmelCase : Union[str, Any] = nn.SiLU()
__UpperCAmelCase : Optional[int] = nn.Linear(snake_case , embedding_dim * 2 )
__UpperCAmelCase : List[Any] = nn.LayerNorm(snake_case , elementwise_affine=snake_case )
def lowerCamelCase__ ( self : Dict , snake_case : Dict , snake_case : Dict ) -> Any:
__UpperCAmelCase : Optional[Any] = self.linear(self.silu(self.emb(snake_case ) ) )
__UpperCAmelCase : List[str] = torch.chunk(snake_case , 2 )
__UpperCAmelCase : Tuple = self.norm(snake_case ) * (1 + scale) + shift
return x
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : str , snake_case : Optional[int] , snake_case : List[str] ) -> Any:
super().__init__()
__UpperCAmelCase : Union[str, Any] = CombinedTimestepLabelEmbeddings(snake_case , snake_case )
__UpperCAmelCase : List[Any] = nn.SiLU()
__UpperCAmelCase : str = nn.Linear(snake_case , 6 * embedding_dim , bias=snake_case )
__UpperCAmelCase : Optional[int] = nn.LayerNorm(snake_case , elementwise_affine=snake_case , eps=1E-6 )
def lowerCamelCase__ ( self : Any , snake_case : Dict , snake_case : Optional[int] , snake_case : List[str] , snake_case : Any=None ) -> Optional[Any]:
__UpperCAmelCase : Union[str, Any] = self.linear(self.silu(self.emb(snake_case , snake_case , hidden_dtype=snake_case ) ) )
__UpperCAmelCase : List[Any] = emb.chunk(6 , dim=1 )
__UpperCAmelCase : Any = self.norm(snake_case ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class a ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : int , snake_case : int , snake_case : int , snake_case : Optional[str] = None , snake_case : float = 1E-5 ) -> Tuple:
super().__init__()
__UpperCAmelCase : List[str] = num_groups
__UpperCAmelCase : Optional[int] = eps
if act_fn is None:
__UpperCAmelCase : Optional[int] = None
else:
__UpperCAmelCase : int = get_activation(snake_case )
__UpperCAmelCase : int = nn.Linear(snake_case , out_dim * 2 )
def lowerCamelCase__ ( self : str , snake_case : List[str] , snake_case : Optional[Any] ) -> Tuple:
if self.act:
__UpperCAmelCase : int = self.act(snake_case )
__UpperCAmelCase : Optional[int] = self.linear(snake_case )
__UpperCAmelCase : List[str] = emb[:, :, None, None]
__UpperCAmelCase : Dict = emb.chunk(2 , dim=1 )
__UpperCAmelCase : Optional[int] = F.group_norm(snake_case , self.num_groups , eps=self.eps )
__UpperCAmelCase : Union[str, Any] = x * (1 + scale) + shift
return x
| 705 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Any] , snake_case : int , snake_case : int ) -> List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__UpperCAmelCase : str = img
__UpperCAmelCase : List[Any] = img.shape[1]
__UpperCAmelCase : Optional[Any] = img.shape[0]
__UpperCAmelCase : Dict = dst_width
__UpperCAmelCase : List[str] = dst_height
__UpperCAmelCase : Union[str, Any] = self.src_w / self.dst_w
__UpperCAmelCase : List[str] = self.src_h / self.dst_h
__UpperCAmelCase : Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase__ ( self : Any ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__UpperCAmelCase : Any = self.img[self.get_y(snake_case )][self.get_x(snake_case )]
def lowerCamelCase__ ( self : int , snake_case : int ) -> int:
return int(self.ratio_x * x )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase :int = 8_0_0, 6_0_0
__UpperCAmelCase :Dict = imread("image_data/lena.jpg", 1)
__UpperCAmelCase :int = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows() | 266 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {
'configuration_roberta': ['ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RobertaConfig', 'RobertaOnnxConfig'],
'tokenization_roberta': ['RobertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['RobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'RobertaForCausalLM',
'RobertaForMaskedLM',
'RobertaForMultipleChoice',
'RobertaForQuestionAnswering',
'RobertaForSequenceClassification',
'RobertaForTokenClassification',
'RobertaModel',
'RobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRobertaForCausalLM',
'TFRobertaForMaskedLM',
'TFRobertaForMultipleChoice',
'TFRobertaForQuestionAnswering',
'TFRobertaForSequenceClassification',
'TFRobertaForTokenClassification',
'TFRobertaMainLayer',
'TFRobertaModel',
'TFRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FlaxRobertaForCausalLM',
'FlaxRobertaForMaskedLM',
'FlaxRobertaForMultipleChoice',
'FlaxRobertaForQuestionAnswering',
'FlaxRobertaForSequenceClassification',
'FlaxRobertaForTokenClassification',
'FlaxRobertaModel',
'FlaxRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 217 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('T')
class _A ( Generic[T] ):
def __init__( self : Optional[Any] , _A : T ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = data
lowercase : Node[T] | None = None
def __str__( self : List[str] ) -> str:
"""simple docstring"""
return f"""{self.data}"""
class _A ( Generic[T] ):
def __init__( self : str ) -> None:
"""simple docstring"""
lowercase : Node[T] | None = None
def __iter__( self : List[str] ) -> Iterator[T]:
"""simple docstring"""
lowercase : int = self.top
while node:
yield node.data
lowercase : List[str] = node.next
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return "->".join([str(_A ) for item in self] )
def __len__( self : Tuple ) -> int:
"""simple docstring"""
return len(tuple(iter(self ) ) )
def __a ( self : List[str] ) -> bool:
"""simple docstring"""
return self.top is None
def __a ( self : List[Any] , _A : T ) -> None:
"""simple docstring"""
lowercase : Any = Node(_A )
if not self.is_empty():
lowercase : str = self.top
lowercase : Any = node
def __a ( self : List[Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , _A )
lowercase : int = self.top
lowercase : Optional[int] = self.top.next
return pop_node.data
def __a ( self : Optional[Any] ) -> T:
"""simple docstring"""
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def __a ( self : Dict ) -> None:
"""simple docstring"""
lowercase : str = None
if __name__ == "__main__":
from doctest import testmod
testmod() | 217 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def lowercase_ ( _A : int = 2000000 ):
"""simple docstring"""
lowerCamelCase__ : list[int] = [0]
lowerCamelCase__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
lowerCamelCase__ : int = 0
# the area corresponding to the grid that gives the product closest to target
lowerCamelCase__ : int = 0
# an estimate of b, using the quadratic formula
lowerCamelCase__ : float
# the largest integer less than b_estimate
lowerCamelCase__ : int
# the largest integer less than b_estimate
lowerCamelCase__ : int
# the triangle number corresponding to b_floor
lowerCamelCase__ : int
# the triangle number corresponding to b_ceil
lowerCamelCase__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
lowerCamelCase__ : int = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
lowerCamelCase__ : Optional[int] = floor(_A )
lowerCamelCase__ : Tuple = ceil(_A )
lowerCamelCase__ : str = triangle_numbers[b_floor]
lowerCamelCase__ : List[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
lowerCamelCase__ : List[str] = triangle_b_first_guess * triangle_a
lowerCamelCase__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
lowerCamelCase__ : List[Any] = triangle_b_second_guess * triangle_a
lowerCamelCase__ : Union[str, Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f'{solution() = }')
| 5 |
import os
from datetime import datetime as dt
from github import Github
A : Union[str, Any] = [
"good first issue",
"good second issue",
"good difficult issue",
"enhancement",
"new pipeline/model",
"new scheduler",
"wip",
]
def lowercase_ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] = Github(os.environ["GITHUB_TOKEN"] )
lowerCamelCase__ : str = g.get_repo("huggingface/diffusers" )
lowerCamelCase__ : Optional[int] = repo.get_issues(state="open" )
for issue in open_issues:
lowerCamelCase__ : str = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase__ : str = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 5 | 1 |
def __lowerCamelCase ( UpperCamelCase__ = 50000000 ):
'''simple docstring'''
snake_case_ = set()
snake_case_ = int((limit - 24) ** (1 / 2) )
snake_case_ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCamelCase__ ) ) )
for primea in primes:
snake_case_ = primea * primea
for primea in primes:
snake_case_ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case_ = primea * primea * primea * primea
snake_case_ = square + cube + tetr
if total >= limit:
break
ret.add(UpperCamelCase__ )
return len(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 362 | 1 |
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
snake_case : Any = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase=True ) -> Optional[Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
A__ , A__ , A__ , A__ : Dict = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
A__ : Optional[Any] = cached_file(__lowercase , __lowercase , force_download=not use_cached_models )
A__ : Optional[int] = config_class.from_json_file(__lowercase )
A__ : List[str] = True
A__ : Any = True
print(F'Building TensorFlow model from configuration: {config}' )
A__ : List[Any] = model_class(__lowercase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
A__ : Any = cached_file(
__lowercase , __lowercase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
A__ : List[Any] = load_pytorch_checkpoint_in_tfa_model(__lowercase , __lowercase )
if compare_with_pt_model:
A__ : str = tf_model(tf_model.dummy_inputs , training=__lowercase ) # build the network
A__ : List[str] = torch.load(__lowercase , map_location="cpu" )
A__ : int = pt_model_class.from_pretrained(
pretrained_model_name_or_path=__lowercase , config=__lowercase , state_dict=__lowercase )
with torch.no_grad():
A__ : Optional[Any] = pt_model(**pt_model.dummy_inputs )
A__ : Union[str, Any] = pto[0].numpy()
A__ : Dict = tfo[0].numpy()
A__ : str = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(__lowercase , save_format="h5" )
def snake_case__ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=False , ) -> int:
"""simple docstring"""
if args_model_type is None:
A__ : Any = list(MODEL_CLASSES.keys() )
else:
A__ : Any = [args_model_type]
for j, model_type in enumerate(__lowercase , start=1 ):
print("=" * 1_0_0 )
print(F' Converting model type {j}/{len(__lowercase )}: {model_type}' )
print("=" * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
A__ , A__ , A__ , A__ , A__ : Optional[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
A__ : Dict = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
A__ : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(__lowercase , __lowercase ) , start=1 ):
print("-" * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
A__ : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(__lowercase )}: {model_shortcut_name} - model_type {model_type}' )
print("-" * 1_0_0 )
if config_shortcut_name in aws_config_map:
A__ : Tuple = cached_file(__lowercase , __lowercase , force_download=not use_cached_models )
else:
A__ : Dict = config_shortcut_name
if model_shortcut_name in aws_model_maps:
A__ : int = cached_file(__lowercase , __lowercase , force_download=not use_cached_models )
else:
A__ : str = model_shortcut_name
if os.path.isfile(__lowercase ):
A__ : Optional[int] = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=__lowercase , pytorch_checkpoint_path=__lowercase , config_file=__lowercase , tf_dump_path=os.path.join(__lowercase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=__lowercase , )
if remove_cached_files:
os.remove(__lowercase )
os.remove(__lowercase )
if __name__ == "__main__":
snake_case : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
snake_case : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
) | 711 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case__ ( __lowercase ) -> bool:
"""simple docstring"""
A__ : int = int(number**0.5 )
return number == sq * sq
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> tuple[int, int]:
"""simple docstring"""
A__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A__ : int = x_den * y_den * z_den
A__ : int = gcd(__lowercase , __lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case__ ( __lowercase = 3_5 ) -> int:
"""simple docstring"""
A__ : set = set()
A__ : int
A__ : Fraction = Fraction(0 )
A__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A__ : Any = x_num * y_den + x_den * y_num
A__ : List[Any] = x_den * y_den
A__ : List[Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A__ : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Union[str, Any] = int(sqrt(__lowercase ) )
A__ : int = int(sqrt(__lowercase ) )
A__ : Any = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=-1
A__ : Tuple = x_num * y_num
A__ : int = x_den * y_num + x_num * y_den
A__ : List[str] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : str = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = x_num * x_num * y_num * y_num
A__ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Optional[int] = int(sqrt(__lowercase ) )
A__ : List[Any] = int(sqrt(__lowercase ) )
A__ : Union[str, Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : Optional[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
for num, den in unique_s:
total += Fraction(__lowercase , __lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""") | 182 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 345 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class snake_case_ ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase=0.01 , __lowerCAmelCase=1_000 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = p_stop
SCREAMING_SNAKE_CASE_ : Dict = max_length
def __iter__( self ):
SCREAMING_SNAKE_CASE_ : Dict = 0
SCREAMING_SNAKE_CASE_ : Dict = False
while not stop and count < self.max_length:
yield count
count += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = random.random() < self.p_stop
class snake_case_ ( unittest.TestCase ):
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ):
SCREAMING_SNAKE_CASE_ : int = [
BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
for i in range(2 )
]
SCREAMING_SNAKE_CASE_ : Tuple = [list(__lowerCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__lowerCAmelCase ) for shard in batch_sampler_shards] , [len(__lowerCAmelCase ) for e in expected] )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : int = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of total batch size.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
SCREAMING_SNAKE_CASE_ : List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
# Check the shards when the dataset is a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=__lowerCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
SCREAMING_SNAKE_CASE_ : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
SCREAMING_SNAKE_CASE_ : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
# Check the shards when the dataset is very small.
SCREAMING_SNAKE_CASE_ : Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = [[], []]
self.check_batch_sampler_shards(__lowerCAmelCase , __lowerCAmelCase , split_batches=__lowerCAmelCase , even_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [BatchSamplerShard(__lowerCAmelCase , 2 , __lowerCAmelCase , even_batches=__lowerCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def __A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=2 , __lowerCAmelCase=False ):
random.seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = list(__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = [
IterableDatasetShard(
__lowerCAmelCase , batch_size=__lowerCAmelCase , drop_last=__lowerCAmelCase , num_processes=__lowerCAmelCase , process_index=__lowerCAmelCase , split_batches=__lowerCAmelCase , )
for i in range(__lowerCAmelCase )
]
SCREAMING_SNAKE_CASE_ : List[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__lowerCAmelCase )
iterable_dataset_lists.append(list(__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ : Dict = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
SCREAMING_SNAKE_CASE_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
self.assertTrue(len(__lowerCAmelCase ) % shard_batch_size == 0 )
SCREAMING_SNAKE_CASE_ : Dict = []
for idx in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__lowerCAmelCase ) < len(__lowerCAmelCase ):
reference += reference
self.assertListEqual(__lowerCAmelCase , reference[: len(__lowerCAmelCase )] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : str = 42
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset()
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
# Edge case with a very small dataset
SCREAMING_SNAKE_CASE_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
self.check_iterable_dataset_shards(__lowerCAmelCase , __lowerCAmelCase , batch_size=4 , drop_last=__lowerCAmelCase , split_batches=__lowerCAmelCase )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=__lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = SkipBatchSampler(__lowerCAmelCase , 2 )
self.assertListEqual(list(__lowerCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Dict = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
SCREAMING_SNAKE_CASE_ : Tuple = skip_first_batches(__lowerCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def __A ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def __A ( self ):
Accelerator()
SCREAMING_SNAKE_CASE_ : Any = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__lowerCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 345 | 1 |
def lowerCAmelCase( __lowerCamelCase ):
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
__a = ''
while len(_SCREAMING_SNAKE_CASE ) % 3 != 0:
__a = '0' + bin_string
__a = [
bin_string[index : index + 3]
for index in range(len(_SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__a = 0
for index, val in enumerate(_SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(_SCREAMING_SNAKE_CASE ) )
oct_string += str(_SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707 | import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class a__ ( unittest.TestCase ):
def __init__( self , UpperCAmelCase , UpperCAmelCase=2 , UpperCAmelCase=5_6 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=9_9 , UpperCAmelCase=3_2 , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=7 , UpperCAmelCase="gelu_new" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=5_1_2 , UpperCAmelCase=1_6 , UpperCAmelCase=2 , UpperCAmelCase=0.02 , UpperCAmelCase=4 , UpperCAmelCase="block_sparse" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=2 , UpperCAmelCase=3 , ) -> Optional[Any]:
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
__a = rescale_embeddings
__a = attention_type
__a = use_bias
__a = block_size
__a = num_random_blocks
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class a__ ( __snake_case , unittest.TestCase ):
A__ : Dict = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
A__ : int = False
A__ : int = False
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
__a = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
super().test_hidden_states_output()
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__a = self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
__a = model_class(UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ):
return model(input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , **UpperCAmelCase )
with self.subTest('JIT Enabled' ):
__a = model_jitted(**UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__a = model_jitted(**UpperCAmelCase ).to_tuple()
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for jitted_output, output in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1e-5 , UpperCAmelCase="outputs" , UpperCAmelCase=None ) -> Dict:
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 246 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : int = []
_a : List[str] = 1
while len(lowerCamelCase__ ) < 1E6:
constant.append(str(lowerCamelCase__ ) )
i += 1
_a : Optional[Any] = "".join(lowerCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 120 |
from __future__ import annotations
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : list[list[int]] = []
create_all_state(1 , lowerCamelCase__ , lowerCamelCase__ , [] , lowerCamelCase__ )
return result
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(lowerCamelCase__ , total_number - level + 2 ):
current_list.append(lowerCamelCase__ )
create_all_state(i + 1 , lowerCamelCase__ , level - 1 , lowerCamelCase__ , lowerCamelCase__ )
current_list.pop()
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
for i in total_list:
print(*lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = 4
lowerCAmelCase__ = 2
lowerCAmelCase__ = generate_all_combinations(n, k)
print_all_state(total_list)
| 496 | 0 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Any=1_3 , UpperCamelCase : Union[str, Any]=7 , UpperCamelCase : Dict=True , UpperCamelCase : Any=True , UpperCamelCase : List[str]=True , UpperCamelCase : Dict=True , UpperCamelCase : Tuple=9_9 , UpperCamelCase : Dict=1_6 , UpperCamelCase : Any=3_6 , UpperCamelCase : int=6 , UpperCamelCase : Optional[int]=6 , UpperCamelCase : str=6 , UpperCamelCase : Dict=3_7 , UpperCamelCase : Tuple="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : Tuple=5_1_2 , UpperCamelCase : Dict=1_6 , UpperCamelCase : Any=2 , UpperCamelCase : Union[str, Any]=0.0_2 , UpperCamelCase : Tuple=3 , UpperCamelCase : Any=4 , UpperCamelCase : List[str]=None , )->int:
__SCREAMING_SNAKE_CASE : str = parent
__SCREAMING_SNAKE_CASE : Tuple = batch_size
__SCREAMING_SNAKE_CASE : Optional[int] = seq_length
__SCREAMING_SNAKE_CASE : str = is_training
__SCREAMING_SNAKE_CASE : int = use_input_mask
__SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids
__SCREAMING_SNAKE_CASE : Tuple = use_labels
__SCREAMING_SNAKE_CASE : Tuple = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = embedding_size
__SCREAMING_SNAKE_CASE : Dict = hidden_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_groups
__SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Tuple = max_position_embeddings
__SCREAMING_SNAKE_CASE : Tuple = type_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = type_sequence_label_size
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : List[Any] = num_choices
__SCREAMING_SNAKE_CASE : str = scope
def __snake_case ( self : int )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : List[Any] = None
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[str] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : str )->int:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __snake_case ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : int )->List[Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = AlbertModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )
__SCREAMING_SNAKE_CASE : List[Any] = model(UpperCamelCase , token_type_ids=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self : Any , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple )->int:
__SCREAMING_SNAKE_CASE : str = AlbertForPreTraining(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , sentence_order_label=UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] )->List[str]:
__SCREAMING_SNAKE_CASE : Any = AlbertForMaskedLM(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Dict , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Tuple , UpperCamelCase : List[str] )->Tuple:
__SCREAMING_SNAKE_CASE : str = AlbertForQuestionAnswering(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : int = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Tuple = AlbertForSequenceClassification(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Any = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : str )->Dict:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : Any = AlbertForTokenClassification(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Dict )->Any:
__SCREAMING_SNAKE_CASE : Any = self.num_choices
__SCREAMING_SNAKE_CASE : Tuple = AlbertForMultipleChoice(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__SCREAMING_SNAKE_CASE : int = model(
UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[str] )->Optional[Any]:
__SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
), (
__SCREAMING_SNAKE_CASE
),
) : Optional[Any] = config_and_inputs
__SCREAMING_SNAKE_CASE : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE (UpperCamelCase , UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = True
def __snake_case ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : str=False )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Optional[int] = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
if return_labels:
if model_class in get_values(UpperCamelCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase )
return inputs_dict
def __snake_case ( self : List[Any] )->Dict:
__SCREAMING_SNAKE_CASE : List[Any] = AlbertModelTester(self )
__SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=UpperCamelCase , hidden_size=3_7 )
def __snake_case ( self : str )->int:
self.config_tester.run_common_tests()
def __snake_case ( self : List[str] )->Optional[int]:
__SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def __snake_case ( self : Tuple )->List[str]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase )
def __snake_case ( self : str )->List[str]:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Any:
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
def __snake_case ( self : Any )->List[Any]:
__SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase )
def __snake_case ( self : Dict )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase )
def __snake_case ( self : Tuple )->Optional[int]:
__SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__SCREAMING_SNAKE_CASE : int = type
self.model_tester.create_and_check_model(*UpperCamelCase )
@slow
def __snake_case ( self : Optional[int] )->int:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE : int = AlbertModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
@require_torch
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def __snake_case ( self : str )->Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = AlbertModel.from_pretrained("albert-base-v2" )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
__SCREAMING_SNAKE_CASE : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : Tuple = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1E-4 ) )
| 447 |
from math import sqrt
def _lowerCAmelCase ( __lowerCamelCase : int = 1000000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(__lowerCamelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 447 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCamelCase ( UpperCamelCase : str = "https://www.worldometers.info/coronavirus" ) -> dict:
_lowerCamelCase = BeautifulSoup(requests.get(UpperCamelCase ).text , 'html.parser' )
_lowerCamelCase = soup.findAll('h1' )
_lowerCamelCase = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(UpperCamelCase , UpperCamelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''') | 544 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowercase_ , lowercase_=7 , lowercase_=3 , lowercase_=3_0 , lowercase_=4_0_0 , lowercase_=True , lowercase_=None , lowercase_=True , lowercase_=[0.5, 0.5, 0.5] , lowercase_=[0.5, 0.5, 0.5] , lowercase_=True , lowercase_=1 / 2_5_5 , lowercase_=True , ) -> Union[str, Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = min_resolution
UpperCAmelCase = max_resolution
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean
UpperCAmelCase = image_std
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = do_pad
def a_ ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a_ ( self , lowercase_ , lowercase_=False ) -> Union[str, Any]:
if not batched:
UpperCAmelCase = image_inputs[0]
if isinstance(lowercase_ , Image.Image ):
UpperCAmelCase , UpperCAmelCase = image.size
else:
UpperCAmelCase , UpperCAmelCase = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase = int(self.size['shortest_edge'] * h / w )
UpperCAmelCase = self.size['shortest_edge']
elif w > h:
UpperCAmelCase = self.size['shortest_edge']
UpperCAmelCase = int(self.size['shortest_edge'] * w / h )
else:
UpperCAmelCase = self.size['shortest_edge']
UpperCAmelCase = self.size['shortest_edge']
else:
UpperCAmelCase = []
for image in image_inputs:
UpperCAmelCase , UpperCAmelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase = max(lowercase_ , key=lambda lowercase_ : item[0] )[0]
UpperCAmelCase = max(lowercase_ , key=lambda lowercase_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = DetaImageProcessor if is_vision_available() else None
def a_ ( self ) -> List[Any]:
UpperCAmelCase = DetaImageProcessingTester(self )
@property
def a_ ( self ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ) -> Dict:
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowercase_ , 'do_rescale' ) )
self.assertTrue(hasattr(lowercase_ , 'do_pad' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
def a_ ( self ) -> int:
UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3} )
self.assertEqual(image_processor.do_pad , lowercase_ )
def a_ ( self ) -> int:
pass
def a_ ( self ) -> Union[str, Any]:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
UpperCAmelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ) -> Tuple:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a_ ( self ) -> Optional[Any]:
# Initialize image_processing
UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
UpperCAmelCase , UpperCAmelCase = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a_ ( self ) -> str:
# prepare image and target
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
UpperCAmelCase = DetaImageProcessor()
UpperCAmelCase = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
UpperCAmelCase = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify orig_size
UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
@slow
def a_ ( self ) -> Any:
# prepare image, target and masks_path
UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
UpperCAmelCase = json.loads(f.read() )
UpperCAmelCase = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
UpperCAmelCase = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
UpperCAmelCase = DetaImageProcessor(format='coco_panoptic' )
UpperCAmelCase = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='pt' )
# verify pixel values
UpperCAmelCase = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding['pixel_values'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) )
# verify area
UpperCAmelCase = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , lowercase_ ) )
# verify boxes
UpperCAmelCase = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , lowercase_ )
UpperCAmelCase = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , lowercase_ , atol=1E-3 ) )
# verify image_id
UpperCAmelCase = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , lowercase_ ) )
# verify is_crowd
UpperCAmelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , lowercase_ ) )
# verify class_labels
UpperCAmelCase = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , lowercase_ ) )
# verify masks
UpperCAmelCase = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , lowercase_ )
# verify orig_size
UpperCAmelCase = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , lowercase_ ) )
# verify size
UpperCAmelCase = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , lowercase_ ) )
| 373 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCamelCase : Optional[Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCamelCase : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCamelCase : Optional[int] = re.compile(R"\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_lowerCamelCase : int = {
"DecisionTransformerConfig",
"EncoderDecoderConfig",
"MusicgenConfig",
"RagConfig",
"SpeechEncoderDecoderConfig",
"TimmBackboneConfig",
"VisionEncoderDecoderConfig",
"VisionTextDualEncoderConfig",
"LlamaConfig",
}
def _UpperCAmelCase (UpperCamelCase_ : Any ):
_lowerCAmelCase : int = None
# source code of `config_class`
_lowerCAmelCase : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("""/""" ):
_lowerCAmelCase : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_lowerCAmelCase : Dict = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
_lowerCAmelCase : str = ckpt_name
break
return checkpoint
def _UpperCAmelCase ():
_lowerCAmelCase : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_lowerCAmelCase : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
_lowerCAmelCase : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F"The following configurations don\'t contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 703 |
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = [int(UpperCamelCase_ ) for i in ip_va_address.split(""".""" ) if i.isdigit()]
return len(UpperCamelCase_ ) == 4 and all(0 <= int(UpperCamelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input().strip()
_lowerCamelCase : int = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 196 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
A_ : List[Any] = logging.get_logger(__name__)
A_ : List[str] = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if config is None:
assert isinstance(self.model , __SCREAMING_SNAKE_CASE ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f" {self.model.__class__}"
)
snake_case__ : Optional[Any] = self.model.config
else:
snake_case__ : int = config
snake_case__ : List[str] = data_args
snake_case__ : Union[str, Any] = self.config.tgt_vocab_size if isinstance(self.config , __SCREAMING_SNAKE_CASE ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"
""" padding..""" )
if self.args.label_smoothing == 0:
snake_case__ : Union[str, Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case__ : Optional[int] = label_smoothed_nll_loss
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
if self.optimizer is None:
snake_case__ : Tuple = ["""bias""", """LayerNorm.weight"""]
snake_case__ : Tuple = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
snake_case__ : Dict = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case__ : str = Adafactor
snake_case__ : Dict = {"""scale_parameter""": False, """relative_step""": False}
else:
snake_case__ : Union[str, Any] = AdamW
snake_case__ : Dict = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
snake_case__ : Union[str, Any] = self.args.learning_rate
if self.sharded_ddp:
snake_case__ : Dict = OSS(
params=__SCREAMING_SNAKE_CASE , optim=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
snake_case__ : Tuple = optimizer_cls(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if self.lr_scheduler is None:
snake_case__ : Union[str, Any] = self._get_lr_scheduler(__SCREAMING_SNAKE_CASE )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case__ : Optional[int] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case__ : int = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case__ : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__SCREAMING_SNAKE_CASE )
return scheduler
def __UpperCamelCase ( self ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case__ : str = model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Union[str, Any] = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case__ , snake_case__ : Tuple = model(**__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[:2]
else:
# compute label smoothed loss
snake_case__ : Optional[Any] = model(**__SCREAMING_SNAKE_CASE , use_cache=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : str = torch.nn.functional.log_softmax(__SCREAMING_SNAKE_CASE , dim=-1 )
snake_case__ , snake_case__ : str = self.loss_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Dict = inputs.pop("""labels""" )
snake_case__ , snake_case__ : Optional[int] = self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return loss
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , ):
snake_case__ : List[str] = self._prepare_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case__ : List[str] = self.model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , **__SCREAMING_SNAKE_CASE , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case__ : Optional[int] = self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
snake_case__ : List[Any] = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
snake_case__ , snake_case__ : str = self._compute_loss(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case__ : Tuple = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case__ : List[Any] = self._pad_tensors_to_max_len(__SCREAMING_SNAKE_CASE , gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# If PAD token is not defined at least EOS token has to be defined
snake_case__ : List[str] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f" padded to `max_length`={max_length}" )
snake_case__ : Dict = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case__ : int = tensor
return padded_tensor
| 38 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
snake_case__ : int = [[1, 2, 4], [1, 2, 3, 4]]
snake_case__ : Any = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
self.assertTrue(isinstance(dc.token_ids , __SCREAMING_SNAKE_CASE ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCamelCase ( self ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
snake_case__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
DisjunctiveConstraint(__SCREAMING_SNAKE_CASE ) # fails here
def __UpperCamelCase ( self ):
snake_case__ : List[str] = [[1, 2, 3], [1, 2, 4]]
snake_case__ : Optional[int] = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : Any = dc.update(1 )
snake_case__ : Any = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : Tuple = dc.update(2 )
snake_case__ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(3 )
snake_case__ : List[str] = stepped is True and completed is True and reset is False
self.assertTrue(__SCREAMING_SNAKE_CASE )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
snake_case__ : int = DisjunctiveConstraint(__SCREAMING_SNAKE_CASE )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
snake_case__ , snake_case__ , snake_case__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
snake_case__ , snake_case__ , snake_case__ : Dict = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 38 | 1 |
from abc import ABC, abstractmethod
from typing import List, Optional
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self) -> List[Any]:
"""simple docstring"""
self.test()
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =0
UpperCamelCase__ : str =False
while not completed:
if counter == 1:
self.reset()
UpperCamelCase__ : int =self.advance()
if not self.does_advance(__SCREAMING_SNAKE_CASE):
raise Exception(
"Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.")
UpperCamelCase__ : Dict =self.update(__SCREAMING_SNAKE_CASE)
counter += 1
if counter > 1_00_00:
raise Exception("update() does not fulfill the constraint.")
if self.remaining() != 0:
raise Exception("Custom Constraint is not defined correctly.")
@abstractmethod
def UpperCAmelCase ( self) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
@abstractmethod
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE=False) -> int:
"""simple docstring"""
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''')
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE) -> List[Any]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self).__init__()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or len(__SCREAMING_SNAKE_CASE) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''')
if any((not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or token_id < 0) for token_id in token_ids):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''')
UpperCamelCase__ : str =token_ids
UpperCamelCase__ : Tuple =len(self.token_ids)
UpperCamelCase__ : List[Any] =-1 # the index of the currently fulfilled step
UpperCamelCase__ : Union[str, Any] =False
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}''')
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}''')
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : Tuple =False
if self.does_advance(__SCREAMING_SNAKE_CASE):
self.fulfilled_idx += 1
UpperCamelCase__ : Any =True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCamelCase__ : Dict =True
UpperCamelCase__ : Optional[Any] =completed
else:
# failed to make progress.
UpperCamelCase__ : Any =True
self.reset()
return stepped, completed, reset
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
UpperCamelCase__ : Dict =False
UpperCamelCase__ : int =0
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE=False) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int =PhrasalConstraint(self.token_ids)
if stateful:
UpperCamelCase__ : Dict =self.seqlen
UpperCamelCase__ : Union[str, Any] =self.fulfilled_idx
UpperCamelCase__ : Optional[Any] =self.completed
return new_constraint
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=True) -> str:
"""simple docstring"""
UpperCamelCase__ : List[str] =max([len(__SCREAMING_SNAKE_CASE) for one in nested_token_ids])
UpperCamelCase__ : List[Any] ={}
for token_ids in nested_token_ids:
UpperCamelCase__ : Dict =root
for tidx, token_id in enumerate(__SCREAMING_SNAKE_CASE):
if token_id not in level:
UpperCamelCase__ : Union[str, Any] ={}
UpperCamelCase__ : List[str] =level[token_id]
if no_subsets and self.has_subsets(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(
"Each list in `nested_token_ids` can't be a complete subset of another list, but is"
F''' {nested_token_ids}.''')
UpperCamelCase__ : List[str] =root
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =self.trie
for current_token in current_seq:
UpperCamelCase__ : Dict =start[current_token]
UpperCamelCase__ : Dict =list(start.keys())
return next_tokens
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] =self.next_tokens(__SCREAMING_SNAKE_CASE)
return len(__SCREAMING_SNAKE_CASE) == 0
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
UpperCamelCase__ : str =list(root.values())
if len(__SCREAMING_SNAKE_CASE) == 0:
return 1
else:
return sum([self.count_leaves(__SCREAMING_SNAKE_CASE) for nn in next_nodes])
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Tuple =self.count_leaves(__SCREAMING_SNAKE_CASE)
return len(__SCREAMING_SNAKE_CASE) != leaf_count
class lowercase__( snake_case__ ):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
super(__SCREAMING_SNAKE_CASE , self).__init__()
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or len(__SCREAMING_SNAKE_CASE) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''')
if any(not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) for token_ids in nested_token_ids):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''')
if any(
any((not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) or token_id < 0) for token_id in token_ids)
for token_ids in nested_token_ids):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''')
UpperCamelCase__ : List[str] =DisjunctiveTrie(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : Union[str, Any] =nested_token_ids
UpperCamelCase__ : str =self.trie.max_height
UpperCamelCase__ : Optional[int] =[]
UpperCamelCase__ : List[Any] =False
def UpperCAmelCase ( self) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =self.trie.next_tokens(self.current_seq)
if len(__SCREAMING_SNAKE_CASE) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}''')
UpperCamelCase__ : List[str] =self.trie.next_tokens(self.current_seq)
return token_id in next_tokens
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Dict:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(__SCREAMING_SNAKE_CASE)}''')
UpperCamelCase__ : Tuple =False
UpperCamelCase__ : List[Any] =False
UpperCamelCase__ : List[str] =False
if self.does_advance(__SCREAMING_SNAKE_CASE):
self.current_seq.append(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =True
else:
UpperCamelCase__ : Tuple =True
self.reset()
UpperCamelCase__ : Any =self.trie.reached_leaf(self.current_seq)
UpperCamelCase__ : Optional[Any] =completed
return stepped, completed, reset
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] =False
UpperCamelCase__ : Any =[]
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE=False) -> str:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] =DisjunctiveConstraint(self.token_ids)
if stateful:
UpperCamelCase__ : List[str] =self.seqlen
UpperCamelCase__ : List[str] =self.current_seq
UpperCamelCase__ : int =self.completed
return new_constraint
class lowercase__:
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str =constraints
# max # of steps required to fulfill a given constraint
UpperCamelCase__ : Dict =max([c.seqlen for c in constraints])
UpperCamelCase__ : Any =len(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =False
self.init_state()
def UpperCAmelCase ( self) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Any =[]
UpperCamelCase__ : Dict =None
UpperCamelCase__ : int =[constraint.copy(stateful=__SCREAMING_SNAKE_CASE) for constraint in self.constraints]
def UpperCAmelCase ( self) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] =0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints) * self.max_seqlen) + add
def UpperCAmelCase ( self) -> Dict:
"""simple docstring"""
UpperCamelCase__ : List[str] =[]
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCamelCase__ : Optional[int] =constraint.advance()
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.append(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.extend(__SCREAMING_SNAKE_CASE)
else:
UpperCamelCase__ : List[Any] =self.inprogress_constraint.advance()
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.append(__SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
token_list.extend(__SCREAMING_SNAKE_CASE)
if len(__SCREAMING_SNAKE_CASE) == 0:
return None
else:
return token_list
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> int:
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCamelCase__ : Optional[Any] =self.add(__SCREAMING_SNAKE_CASE)
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''')
UpperCamelCase__ : List[Any] =False, False
if self.completed:
UpperCamelCase__ : List[Any] =True
UpperCamelCase__ : Optional[int] =False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCamelCase__ : Tuple =self.inprogress_constraint.update(__SCREAMING_SNAKE_CASE)
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE))
UpperCamelCase__ : Optional[Any] =None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint)
UpperCamelCase__ : Tuple =None
if len(self.pending_constraints) == 0:
# we're done!
UpperCamelCase__ : Union[str, Any] =True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints):
if pending_constraint.does_advance(__SCREAMING_SNAKE_CASE):
UpperCamelCase__ : List[str] =pending_constraint.update(__SCREAMING_SNAKE_CASE)
if not stepped:
raise Exception(
"`constraint.update(token_id)` is not yielding incremental progress, "
"even though `constraint.does_advance(token_id)` is true.")
if complete:
self.complete_constraints.append(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[Any] =None
if not complete and stepped:
UpperCamelCase__ : Any =pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCamelCase__ : List[str] =(
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCamelCase__ : int =True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE=True) -> int:
"""simple docstring"""
UpperCamelCase__ : Tuple =ConstraintListState(self.constraints) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCamelCase__ : List[Any] =[
constraint.copy(stateful=__SCREAMING_SNAKE_CASE) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCamelCase__ : List[Any] =self.inprogress_constraint.copy(stateful=__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : List[str] =[constraint.copy() for constraint in self.pending_constraints]
return new_state
| 706 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass(frozen=snake_case__ )
class lowercase__:
'''simple docstring'''
snake_case__ = 42
snake_case__ = 42
snake_case__ = None
snake_case__ = None
snake_case__ = None
@dataclass(frozen=snake_case__ )
class lowercase__:
'''simple docstring'''
snake_case__ = 42
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowercase__( snake_case__ ):
'''simple docstring'''
snake_case__ = 42
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE = False , ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : int =hans_processors[task]()
UpperCamelCase__ : int =os.path.join(
__SCREAMING_SNAKE_CASE , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__SCREAMING_SNAKE_CASE) , __SCREAMING_SNAKE_CASE , ) , )
UpperCamelCase__ : List[str] =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : int =label_list[2], label_list[1]
UpperCamelCase__ : Optional[int] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase__ : str =cached_features_file + ".lock"
with FileLock(__SCREAMING_SNAKE_CASE):
if os.path.exists(__SCREAMING_SNAKE_CASE) and not overwrite_cache:
logger.info(F'''Loading features from cached file {cached_features_file}''')
UpperCamelCase__ : str =torch.load(__SCREAMING_SNAKE_CASE)
else:
logger.info(F'''Creating features from dataset file at {data_dir}''')
UpperCamelCase__ : Optional[Any] =(
processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
)
logger.info("Training examples: %s" , len(__SCREAMING_SNAKE_CASE))
UpperCamelCase__ : List[Any] =hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
logger.info("Saving features into cached file %s" , __SCREAMING_SNAKE_CASE)
torch.save(self.features , __SCREAMING_SNAKE_CASE)
def __len__( self) -> List[Any]:
"""simple docstring"""
return len(self.features)
def __getitem__( self , __SCREAMING_SNAKE_CASE) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase ( self) -> str:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowercase__:
'''simple docstring'''
snake_case__ = 42
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1_28 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE = False , ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] =hans_processors[task]()
UpperCamelCase__ : int =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase__ , UpperCamelCase__ : int =label_list[2], label_list[1]
UpperCamelCase__ : List[str] =label_list
UpperCamelCase__ : Any =processor.get_dev_examples(__SCREAMING_SNAKE_CASE) if evaluate else processor.get_train_examples(__SCREAMING_SNAKE_CASE)
UpperCamelCase__ : str =hans_convert_examples_to_features(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features) , desc="convert examples to features"):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__SCREAMING_SNAKE_CASE)))
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase__ : Optional[int] =tf.data.Dataset.from_generator(
__SCREAMING_SNAKE_CASE , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([]),
"input_ids": tf.TensorShape([None, None]),
"attention_mask": tf.TensorShape([None, None]),
"token_type_ids": tf.TensorShape([None, None]),
},
tf.TensorShape([]),
) , )
def UpperCAmelCase ( self) -> Tuple:
"""simple docstring"""
return self.dataset
def __len__( self) -> str:
"""simple docstring"""
return len(self.features)
def __getitem__( self , __SCREAMING_SNAKE_CASE) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def UpperCAmelCase ( self) -> int:
"""simple docstring"""
return self.label_list
class lowercase__( snake_case__ ):
'''simple docstring'''
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , "heuristics_train_set.txt")) , "train")
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE) -> Union[str, Any]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(__SCREAMING_SNAKE_CASE , "heuristics_evaluation_set.txt")) , "dev")
def UpperCAmelCase ( self) -> Any:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] =[]
for i, line in enumerate(__SCREAMING_SNAKE_CASE):
if i == 0:
continue
UpperCamelCase__ : str ="%s-%s" % (set_type, line[0])
UpperCamelCase__ : Union[str, Any] =line[5]
UpperCamelCase__ : Any =line[6]
UpperCamelCase__ : Tuple =line[7][2:] if line[7].startswith("ex") else line[7]
UpperCamelCase__ : Any =line[0]
examples.append(InputExample(guid=__SCREAMING_SNAKE_CASE , text_a=__SCREAMING_SNAKE_CASE , text_b=__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE))
return examples
def _lowerCamelCase ( A_ : List[InputExample] , A_ : List[str] , A_ : int , A_ : PreTrainedTokenizer , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] ={label: i for i, label in enumerate(A_ )}
UpperCamelCase__ : Dict =[]
for ex_index, example in tqdm.tqdm(enumerate(A_ ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCamelCase__ : Tuple =tokenizer(
example.text_a , example.text_b , add_special_tokens=A_ , max_length=A_ , padding="max_length" , truncation=A_ , return_overflowing_tokens=A_ , )
UpperCamelCase__ : int =label_map[example.label] if example.label in label_map else 0
UpperCamelCase__ : List[str] =int(example.pairID )
features.append(InputFeatures(**A_ , label=A_ , pairID=A_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
__UpperCAmelCase = {
"""hans""": 3,
}
__UpperCAmelCase = {
"""hans""": HansProcessor,
}
| 582 | 0 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
UpperCAmelCase__ : List[Any] = b * b - 4 * a * c
UpperCAmelCase__ : Any = (-b + sqrt(__UpperCamelCase )) / (2 * a)
UpperCAmelCase__ : Optional[Any] = (-b - sqrt(__UpperCamelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 65 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __magic_name__ (unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=18 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=None , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=False , ) -> Any:
lowerCAmelCase_ = size if size is not None else {"height": 20, "width": 20}
lowerCAmelCase_ = crop_size if crop_size is not None else {"height": 18, "width": 18}
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = image_size
lowerCAmelCase_ = min_resolution
lowerCAmelCase_ = max_resolution
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = do_center_crop
lowerCAmelCase_ = crop_size
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean
lowerCAmelCase_ = image_std
lowerCAmelCase_ = do_reduce_labels
def __a ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def A():
lowerCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowerCAmelCase_ = Image.open(dataset[0]["file"] )
lowerCAmelCase_ = Image.open(dataset[1]["file"] )
return image, map
def A():
lowerCAmelCase_ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
lowerCAmelCase_ = Image.open(ds[0]["file"] )
lowerCAmelCase_ = Image.open(ds[1]["file"] )
lowerCAmelCase_ = Image.open(ds[2]["file"] )
lowerCAmelCase_ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = BeitImageProcessor if is_vision_available() else None
def __a ( self ) -> int:
lowerCAmelCase_ = BeitImageProcessingTester(self )
@property
def __a ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , "do_resize" ) )
self.assertTrue(hasattr(_a , "size" ) )
self.assertTrue(hasattr(_a , "do_center_crop" ) )
self.assertTrue(hasattr(_a , "center_crop" ) )
self.assertTrue(hasattr(_a , "do_normalize" ) )
self.assertTrue(hasattr(_a , "image_mean" ) )
self.assertTrue(hasattr(_a , "image_std" ) )
def __a ( self ) -> Dict:
lowerCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , _a )
lowerCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_a )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , _a )
def __a ( self ) -> str:
pass
def __a ( self ) -> List[Any]:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Dict:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Tuple:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
lowerCAmelCase_ = image_processing(_a , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
lowerCAmelCase_ = []
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowerCAmelCase_ = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
lowerCAmelCase_ , lowerCAmelCase_ = prepare_semantic_single_inputs()
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
lowerCAmelCase_ , lowerCAmelCase_ = prepare_semantic_batch_inputs()
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __a ( self ) -> Any:
# Initialize image_processing
lowerCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowerCAmelCase_ , lowerCAmelCase_ = prepare_semantic_single_inputs()
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
lowerCAmelCase_ = True
lowerCAmelCase_ = image_processing(_a , _a , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 122 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ : Tuple = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
UpperCamelCase__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_SCREAMING_SNAKE_CASE = int(re.match(r""".*layer_(\d*).*""" , a_ )[1] )
layer_number -= 3
return F"h.{layer_number}." + key
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_SCREAMING_SNAKE_CASE = re.search(r"""[^\d](\d+)$""" , str(a_ ) )
if bit_search is None:
raise ValueError(F"`dtype` is not a valid dtype: {dtype}." )
_SCREAMING_SNAKE_CASE = int(bit_search.groups()[0] )
return bit_size // 8
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
"""simple docstring"""
if bloom_config_file == "":
_SCREAMING_SNAKE_CASE = BloomConfig()
else:
_SCREAMING_SNAKE_CASE = BloomConfig.from_json_file(a_ )
if shard_model:
_SCREAMING_SNAKE_CASE = os.listdir(a_ )
_SCREAMING_SNAKE_CASE = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("""layer""" ) and "model_00" in s , a_ ) )
_SCREAMING_SNAKE_CASE = {'''weight_map''': {}, '''metadata''': {}}
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = BloomConfig()
for j, file in enumerate(a_ ):
print("""Processing file: {}""".format(a_ ) )
_SCREAMING_SNAKE_CASE = None
for i in range(a_ ):
# load all TP files
_SCREAMING_SNAKE_CASE = file.replace("""model_00""" , F"model_0{i}" )
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(a_ , a_ ) , map_location="""cpu""" )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE = temp.pop(a_ )
if tensors is None:
_SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
torch.save(
a_ , os.path.join(
a_ , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
_SCREAMING_SNAKE_CASE = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_SCREAMING_SNAKE_CASE = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(a_ ) ).zfill(5 ) )
_SCREAMING_SNAKE_CASE = BloomConfig()
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
_SCREAMING_SNAKE_CASE = total_size
with open(a_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a_ , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = json.dumps(a_ , indent=2 , sort_keys=a_ ) + '''\n'''
f.write(a_ )
else:
_SCREAMING_SNAKE_CASE = BloomModel(a_ )
_SCREAMING_SNAKE_CASE = os.listdir(a_ )
_SCREAMING_SNAKE_CASE = sorted(filter(lambda SCREAMING_SNAKE_CASE_ : s.startswith("""layer""" ) and "model_00" in s , a_ ) )
_SCREAMING_SNAKE_CASE = None
for i, file in enumerate(a_ ):
_SCREAMING_SNAKE_CASE = None
for i in range(a_ ):
# load all TP files
_SCREAMING_SNAKE_CASE = file.replace("""model_00""" , F"model_0{i}" )
_SCREAMING_SNAKE_CASE = torch.load(os.path.join(a_ , a_ ) , map_location="""cpu""" )
# Rename keys in the transformers names
_SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
_SCREAMING_SNAKE_CASE = temp.pop(a_ )
if tensors is None:
_SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=a_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
_SCREAMING_SNAKE_CASE = model.load_state_dict(a_ , strict=a_ )
assert not other_keys.unexpected_keys, F"The keys {other_keys.unexpected_keys} are unexpected"
if missing_keys is None:
_SCREAMING_SNAKE_CASE = set(other_keys.missing_keys )
else:
_SCREAMING_SNAKE_CASE = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"The keys {missing_keys} are missing"
# Save pytorch-model
os.makedirs(a_ , exist_ok=a_ )
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
_SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}" )
if config.torch_dtype is not None:
_SCREAMING_SNAKE_CASE = model.to(config.torch_dtype )
torch.save(model.state_dict() , a_ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(a_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
UpperCamelCase__ : str = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 717 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _a (unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_28, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_42, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
_SCREAMING_SNAKE_CASE = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_28,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_42,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A__ ) , A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A__ ) , x.transpose() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , transpose(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , transpose(A__ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ ) , np.asarray(transpose(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(transpose(A__ , axes=(1, 2, 0) ) , np.asarray(transpose(A__ , axes=(1, 2, 0) ) ) ) )
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.reshape(A__ , (4, 3) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.reshape(A__ , (12, 5) ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , reshape(A__ , (4, 3) ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , reshape(A__ , (12, 5) ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (4, 3) ) , np.asarray(reshape(A__ , (4, 3) ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(reshape(A__ , (12, 5) ) , np.asarray(reshape(A__ , (12, 5) ) ) ) )
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A__ ) , np.squeeze(A__ ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.squeeze(A__ , axis=2 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , squeeze(A__ ).numpy() ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , squeeze(A__ , axis=2 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ ) , np.asarray(squeeze(A__ ) ) ) )
_SCREAMING_SNAKE_CASE = np.random.randn(1 , 4 , 1 , 5 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(squeeze(A__ , axis=2 ) , np.asarray(squeeze(A__ , axis=2 ) ) ) )
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.expand_dims(A__ , axis=1 ) ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = torch.tensor(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_tf
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = tf.constant(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , expand_dims(A__ , axis=1 ).numpy() ) )
@require_flax
def UpperCamelCase ( self ) -> Any:
_SCREAMING_SNAKE_CASE = np.random.randn(3 , 4 )
_SCREAMING_SNAKE_CASE = jnp.array(A__ )
self.assertTrue(np.allclose(expand_dims(A__ , axis=1 ) , np.asarray(expand_dims(A__ , axis=1 ) ) ) )
| 0 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : List[str] ={
'''configuration_rembert''': ['''REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RemBertConfig''', '''RemBertOnnxConfig''']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] =['''RemBertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] =['''RemBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] =[
'''REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RemBertForCausalLM''',
'''RemBertForMaskedLM''',
'''RemBertForMultipleChoice''',
'''RemBertForQuestionAnswering''',
'''RemBertForSequenceClassification''',
'''RemBertForTokenClassification''',
'''RemBertLayer''',
'''RemBertModel''',
'''RemBertPreTrainedModel''',
'''load_tf_weights_in_rembert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : str =[
'''TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRemBertForCausalLM''',
'''TFRemBertForMaskedLM''',
'''TFRemBertForMultipleChoice''',
'''TFRemBertForQuestionAnswering''',
'''TFRemBertForSequenceClassification''',
'''TFRemBertForTokenClassification''',
'''TFRemBertLayer''',
'''TFRemBertModel''',
'''TFRemBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
a__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 399 |
'''simple docstring'''
import math
def lowercase__ ( __lowercase : int ) -> int:
"""simple docstring"""
if not isinstance(__lowercase , __lowercase ):
__UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowercase )
if number < 1:
__UpperCamelCase = F'''Input value of [number={number}] must be > 0'''
raise ValueError(__lowercase )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__UpperCamelCase = int(math.log(number // 3 , 2 ) ) + 2
__UpperCamelCase = [3, 5]
__UpperCamelCase = 2
__UpperCamelCase = 3
for block in range(1 , __lowercase ):
for _ in range(__lowercase ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a__ : int =0
try:
a__ : str =proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 399 | 1 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="""session""" )
def snake_case ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Dict = 10
UpperCamelCase_ : Dict = datasets.Features(
{
"""tokens""": datasets.Sequence(datasets.Value("""string""" ) ),
"""labels""": datasets.Sequence(datasets.ClassLabel(names=["""negative""", """positive"""] ) ),
"""answers""": datasets.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
"""id""": datasets.Value("""int64""" ),
} )
UpperCamelCase_ : List[Any] = datasets.Dataset.from_dict(
{
"""tokens""": [["""foo"""] * 5] * n,
"""labels""": [[1] * 5] * n,
"""answers""": [{"""answer_start""": [97], """text""": ["""1976"""]}] * 10,
"""id""": list(range(_A ) ),
} , features=_A , )
return dataset
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[int] , a_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """file.arrow""" )
dataset.map(cache_file_name=_A )
return filename
# FILE_CONTENT + files
UpperCamelCase ='''\
Text data.
Second line of data.'''
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt"""
UpperCamelCase_ : str = FILE_CONTENT
with open(_A , """w""" ) as f:
f.write(_A )
return filename
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
import bza
UpperCamelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """file.txt.bz2"""
UpperCamelCase_ : List[Any] = bytes(_A , """utf-8""" )
with bza.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Tuple ) -> int:
"""simple docstring"""
import gzip
UpperCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """file.txt.gz""" )
UpperCamelCase_ : str = bytes(_A , """utf-8""" )
with gzip.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Dict ) -> Optional[Any]:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCamelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.lz4"""
UpperCamelCase_ : Dict = bytes(_A , """utf-8""" )
with lza.frame.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str , a_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCamelCase_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_A , """w""" ) as archive:
archive.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str , a_ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
import tarfile
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.tar"""
with tarfile.TarFile(_A , """w""" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Any ) -> int:
"""simple docstring"""
import lzma
UpperCamelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """file.txt.xz"""
UpperCamelCase_ : Tuple = bytes(_A , """utf-8""" )
with lzma.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[Any] , a_ : Tuple ) -> Optional[Any]:
"""simple docstring"""
import zipfile
UpperCamelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / """file.txt.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Tuple ) -> int:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """file.txt.zst"""
UpperCamelCase_ : Any = bytes(_A , """utf-8""" )
with zstd.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """file.xml"""
UpperCamelCase_ : Dict = textwrap.dedent(
"""\
<?xml version=\"1.0\" encoding=\"UTF-8\" ?>
<tmx version=\"1.4\">
<header segtype=\"sentence\" srclang=\"ca\" />
<body>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>
<tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>""" )
with open(_A , """w""" ) as f:
f.write(_A )
return filename
UpperCamelCase =[
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
UpperCamelCase =[
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
UpperCamelCase ={
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
UpperCamelCase =[
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
UpperCamelCase =[
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope="""session""" )
def snake_case ( ) -> Any:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : List[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Any = datasets.Dataset.from_dict(_A )
UpperCamelCase_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.arrow""" )
dataset.map(cache_file_name=_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.sqlite""" )
with contextlib.closing(sqlitea.connect(_A ) ) as con:
UpperCamelCase_ : Union[str, Any] = con.cursor()
cur.execute("""CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)""" )
for item in DATA:
cur.execute("""INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)""" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.csv""" )
with open(_A , """w""" , newline="""""" ) as f:
UpperCamelCase_ : List[str] = csv.DictWriter(_A , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.csv""" )
with open(_A , """w""" , newline="""""" ) as f:
UpperCamelCase_ : str = csv.DictWriter(_A , fieldnames=["""col_1""", """col_2""", """col_3"""] )
writer.writeheader()
for item in DATA:
writer.writerow(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[Any] , a_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
import bza
UpperCamelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.bz2"""
with open(_A , """rb""" ) as f:
UpperCamelCase_ : int = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_A , """wb""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Any , a_ : Union[str, Any] , a_ : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Any = tmp_path_factory.mktemp("""data""" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(csv_path.replace(""".csv""" , """.CSV""" ) ) )
f.write(_A , arcname=os.path.basename(csva_path.replace(""".csv""" , """.CSV""" ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[Any] , a_ : Union[str, Any] , a_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.parquet""" )
UpperCamelCase_ : int = pa.schema(
{
"""col_1""": pa.string(),
"""col_2""": pa.intaa(),
"""col_3""": pa.floataa(),
} )
with open(_A , """wb""" ) as f:
UpperCamelCase_ : List[str] = pq.ParquetWriter(_A , schema=_A )
UpperCamelCase_ : Optional[int] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_A ) )] for k in DATA[0]} , schema=_A )
writer.write_table(_A )
writer.close()
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCamelCase_ : str = {"""data""": DATA}
with open(_A , """w""" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = str(tmp_path_factory.mktemp("""data""" ) / """dataset.json""" )
UpperCamelCase_ : List[Any] = {"""data""": DATA_DICT_OF_LISTS}
with open(_A , """w""" ) as f:
json.dump(_A , _A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Any = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_312.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA_312:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Dict = str(tmp_path_factory.mktemp("""data""" ) / """dataset-str.jsonl""" )
with open(_A , """w""" ) as f:
for item in DATA_STR:
f.write(json.dumps(_A ) + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : List[Any] , a_ : Optional[Any] ) -> str:
"""simple docstring"""
import gzip
UpperCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt.gz""" )
with open(_A , """rb""" ) as orig_file:
with gzip.open(_A , """wb""" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Dict , a_ : int ) -> Optional[Any]:
"""simple docstring"""
import gzip
UpperCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.gz""" )
with open(_A , """rb""" ) as orig_file:
with gzip.open(_A , """wb""" ) as zipped_file:
zipped_file.writelines(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Tuple , a_ : Union[str, Any] , a_ : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Tuple , a_ : Any , a_ : Union[str, Any] , a_ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""nested""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[int] , a_ : Any , a_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : List[Any] , a_ : Union[str, Any] , a_ : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_A , """w""" ) as f:
f.add(_A , arcname=os.path.basename(_A ) )
f.add(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Union[str, Any] , a_ : Dict , a_ : Dict , a_ : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_A , """w""" ) as f:
f.add(_A , arcname=os.path.join("""nested""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = ["""0""", """1""", """2""", """3"""]
UpperCamelCase_ : Optional[int] = str(tmp_path_factory.mktemp("""data""" ) / """dataset.txt""" )
with open(_A , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = ["""0""", """1""", """2""", """3"""]
UpperCamelCase_ : Union[str, Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset2.txt""" )
with open(_A , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = ["""0""", """1""", """2""", """3"""]
UpperCamelCase_ : List[str] = tmp_path_factory.mktemp("""data""" ) / """dataset.abc"""
with open(_A , """w""" ) as f:
for item in data:
f.write(item + """\n""" )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Union[str, Any] , a_ : List[str] , a_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.text.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Tuple , a_ : List[str] , a_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
f.write(_A , arcname=os.path.join("""main_dir""" , os.path.basename(_A ) ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str , a_ : Tuple , a_ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : int = tmp_path_factory.mktemp("""data""" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename("""unsupported.ext""" ) )
f.write(_A , arcname=os.path.basename("""unsupported_2.ext""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = """\n""".join(["""First""", """Second\u2029with Unicode new line""", """Third"""] )
UpperCamelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("""data""" ) / """dataset_with_unicode_new_lines.txt""" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(_A )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( ) -> List[Any]:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_image_rgb.jpg""" )
@pytest.fixture(scope="""session""" )
def snake_case ( ) -> str:
"""simple docstring"""
return os.path.join("""tests""" , """features""" , """data""" , """test_audio_44100.wav""" )
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : Tuple , a_ : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """dataset.img.zip"""
with zipfile.ZipFile(_A , """w""" ) as f:
f.write(_A , arcname=os.path.basename(_A ) )
f.write(_A , arcname=os.path.basename(_A ).replace(""".jpg""" , """2.jpg""" ) )
return path
@pytest.fixture(scope="""session""" )
def snake_case ( a_ : str ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = tmp_path_factory.mktemp("""data_dir""" )
(data_dir / "subdir").mkdir()
with open(data_dir / """subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden file
with open(data_dir / """subdir""" / """.test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / """.subdir""" / """train.txt""" , """w""" ) as f:
f.write("""foo\n""" * 10 )
with open(data_dir / """.subdir""" / """test.txt""" , """w""" ) as f:
f.write("""bar\n""" * 10 )
return data_dir
| 719 |
'''simple docstring'''
from itertools import count
def snake_case ( a_ : int = 50 ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = [1] * min_block_length
for n in count(a_ ):
fill_count_functions.append(1 )
for block_length in range(a_ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 543 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCamelCase_ ( a_ ):
SCREAMING_SNAKE_CASE_ = (DPMSolverSDEScheduler,)
SCREAMING_SNAKE_CASE_ = 10
def SCREAMING_SNAKE_CASE_ ( self : int ,**__lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**__lowerCamelCase )
return config
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] ,[0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__lowerCamelCase ,beta_end=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
a = model(__lowerCamelCase ,__lowerCamelCase )
a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config(prediction_type='''v_prediction''' )
a = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
a = self.dummy_model()
a = self.dummy_sample_deter * scheduler.init_noise_sigma
a = sample.to(__lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
a = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
a = model(__lowerCamelCase ,__lowerCamelCase )
a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
a = model(__lowerCamelCase ,__lowerCamelCase )
a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = self.scheduler_classes[0]
a = self.get_scheduler_config()
a = scheduler_class(**__lowerCamelCase ,use_karras_sigmas=__lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps ,device=__lowerCamelCase )
a = self.dummy_model()
a = self.dummy_sample_deter.to(__lowerCamelCase ) * scheduler.init_noise_sigma
a = sample.to(__lowerCamelCase )
for t in scheduler.timesteps:
a = scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
a = model(__lowerCamelCase ,__lowerCamelCase )
a = scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
a = output.prev_sample
a = torch.sum(torch.abs(__lowerCamelCase ) )
a = torch.mean(torch.abs(__lowerCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1e-2
| 387 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[Any] = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Dict = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 387 | 1 |
from __future__ import annotations
def _a ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) == 0:
return False
_lowerCAmelCase = len(__SCREAMING_SNAKE_CASE ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __SCREAMING_SNAKE_CASE )
else:
return binary_search(a_list[midpoint + 1 :] , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase: Any =input('Enter numbers separated by comma:\n').strip()
_UpperCamelCase: Any =[int(item.strip()) for item in user_input.split(',')]
_UpperCamelCase: Optional[Any] =int(input('Enter the number to be found in the list:\n').strip())
_UpperCamelCase: List[Any] ='' if binary_search(sequence, target) else 'not '
print(F"{target} was {not_str}found in {sequence}")
| 712 |
from __future__ import annotations
from random import choice
def _a ( __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return choice(__SCREAMING_SNAKE_CASE )
def _a ( __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
_lowerCAmelCase = random_pivot(__SCREAMING_SNAKE_CASE )
# partition based on pivot
# linear time
_lowerCAmelCase = [e for e in lst if e < pivot]
_lowerCAmelCase = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(__SCREAMING_SNAKE_CASE ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(__SCREAMING_SNAKE_CASE ) < k - 1:
return kth_number(__SCREAMING_SNAKE_CASE , k - len(__SCREAMING_SNAKE_CASE ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.