code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ ={
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__magic_name__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 415 | from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCamelCase ( A ):
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
__magic_name__ ='''
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
'''
class _A ( __UpperCamelCase ):
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=SCREAMING_SNAKE_CASE_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F"Loading model {model_type}" )
UpperCamelCase__ = model_type
UpperCamelCase__ = tf_checkpoint
UpperCamelCase__ = pytorch_dump_output
UpperCamelCase__ = config
UpperCamelCase__ = finetuning_task_name
def _a (self ) -> Tuple:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
if "ckpt" in self._tf_checkpoint.lower():
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
else:
UpperCamelCase__ = self._tf_checkpoint
UpperCamelCase__ = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
SCREAMING_SNAKE_CASE_ , self._config , self._pytorch_dump_output , SCREAMING_SNAKE_CASE_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(SCREAMING_SNAKE_CASE_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 415 | 1 |
class _a: # Public class to implement a graph
def __init__( self , __snake_case , __snake_case , __snake_case ) -> Optional[int]:
'''simple docstring'''
_snake_case : Optional[Any] = row
_snake_case : List[Any] = col
_snake_case : Tuple = graph
def lowercase ( self , __snake_case , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowercase ( self , __snake_case , __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
_snake_case : Tuple = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
_snake_case : str = [-1, 0, 1, -1, 1, -1, 0, 1]
_snake_case : int = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase_ )
def lowercase ( self ) -> Tuple: # And finally, count all islands.
'''simple docstring'''
_snake_case : Optional[int] = [[False for j in range(self.COL )] for i in range(self.ROW )]
_snake_case : Tuple = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += 1
return count
| 714 |
import datasets
from .evaluate import evaluate
__lowerCAmelCase :Any = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
__lowerCAmelCase :Union[str, Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
__lowerCAmelCase :Optional[Any] = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a( datasets.Metric ):
def lowercase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def lowercase ( self , __snake_case , __snake_case ) -> Union[str, Any]:
'''simple docstring'''
_snake_case : str = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
_snake_case : Union[str, Any] = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
_snake_case : Union[str, Any] = evaluate(dataset=__snake_case , predictions=__snake_case )
return score | 278 | 0 |
'''simple docstring'''
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> int:
"""simple docstring"""
UpperCAmelCase = 0
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ )
for i in range(n - 1 ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def __snake_case ( SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
"""simple docstring"""
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return arr, 0
UpperCAmelCase = len(SCREAMING_SNAKE_CASE_ ) // 2
UpperCAmelCase = arr[0:mid]
UpperCAmelCase = arr[mid:]
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = _count_cross_inversions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def __snake_case ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = UpperCAmelCase = UpperCAmelCase = 0
while i < len(SCREAMING_SNAKE_CASE_ ) and j < len(SCREAMING_SNAKE_CASE_ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(SCREAMING_SNAKE_CASE_ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(SCREAMING_SNAKE_CASE_ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def __snake_case ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
UpperCAmelCase = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE_ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
UpperCAmelCase = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE_ )
# an empty list should also have zero inversions
UpperCAmelCase = []
UpperCAmelCase = count_inversions_bf(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase, UpperCAmelCase = count_inversions_recursive(SCREAMING_SNAKE_CASE_ )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 51 | '''simple docstring'''
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(A_ , A_ ) ) )
def lowerCamelCase__ ( A_ ):
if point:
if isinstance(A_ , A_ ):
for item in point:
if not isinstance(A_ , (int, float) ):
UpperCAmelCase_ = (
"Expected a list of numbers as input, found "
F"""{type(A_ ).__name__}"""
)
raise TypeError(A_ )
else:
UpperCAmelCase_ = F"""Expected a list of numbers as input, found {type(A_ ).__name__}"""
raise TypeError(A_ )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( A_ , A_ ):
_validate_point(A_ )
_validate_point(A_ )
if len(A_ ) != len(A_ ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(A_ , A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 660 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
if gpta_config_file == "":
lowerCamelCase__ = GPTaConfig()
else:
lowerCamelCase__ = GPTaConfig.from_json_file(__lowercase )
lowerCamelCase__ = GPTaModel(__lowercase )
# Load weights from numpy
load_tf_weights_in_gpta(__lowercase , __lowercase , __lowercase )
# Save pytorch-model
lowerCamelCase__ = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCamelCase__ = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __lowercase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
__magic_name__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 258 |
"""simple docstring"""
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 258 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> int:
if not isinstance(snake_case , snake_case ):
raise TypeError('Input value must be an \'int\' type' )
_lowerCamelCase = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 650 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Union[str, Any] ={"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] =["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str =["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =[
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple =[
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
A_ : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 650 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCamelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 387 | import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=2 , __UpperCAmelCase=32 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=4 , __UpperCAmelCase=[0, 1, 2, 3] , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=[1, 384, 24, 24] , __UpperCAmelCase=True , __UpperCAmelCase=None , ):
__A : Dict = parent
__A : Union[str, Any] = batch_size
__A : str = image_size
__A : Optional[Any] = patch_size
__A : str = num_channels
__A : str = is_training
__A : Optional[Any] = use_labels
__A : Union[str, Any] = hidden_size
__A : int = num_hidden_layers
__A : List[Any] = backbone_out_indices
__A : Dict = num_attention_heads
__A : Dict = intermediate_size
__A : Tuple = hidden_act
__A : List[str] = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : int = initializer_range
__A : List[str] = num_labels
__A : str = backbone_featmap_shape
__A : int = scope
__A : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
__A : Optional[Any] = (image_size // patch_size) ** 2
__A : List[Any] = num_patches + 1
def __UpperCAmelCase( self ):
__A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A : Tuple = None
if self.use_labels:
__A : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__A : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase( self ):
__A : Any = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [96, 192, 384, 768],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = DPTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : int = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Dict = self.num_labels
__A : Optional[int] = DPTForDepthEstimation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Dict = model(__UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = self.num_labels
__A : List[Any] = DPTForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
__A : Union[str, Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase( self ):
__A : int = self.prepare_config_and_inputs()
__A , __A , __A : Dict = config_and_inputs
__A : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase_ : Dict = (
{
"""depth-estimation""": DPTForDepthEstimation,
"""feature-extraction""": DPTModel,
"""image-segmentation""": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Union[str, Any] = False
lowerCamelCase_ : Optional[Any] = False
def __UpperCAmelCase( self ):
__A : str = DPTModelTester(self )
__A : Dict = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__A : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def __UpperCAmelCase( self ):
__A , __A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(__UpperCAmelCase )
__A : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : Any = [*signature.parameters.keys()]
__A : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase )
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : int = True
if model_class in get_values(__UpperCAmelCase ):
continue
__A : Dict = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
__A : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : Optional[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
__A , __A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Any = False
__A : Optional[int] = True
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
__A : Tuple = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
__A : Any = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
__A : List[Any] = model(**__UpperCAmelCase ).loss
loss.backward()
def __UpperCAmelCase( self ):
__A , __A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = _config_zero_init(__UpperCAmelCase )
for model_class in self.all_model_classes:
__A : List[Any] = model_class(config=__UpperCAmelCase )
# Skip the check for the backbone
__A : List[str] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
__A : Any = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCAmelCase( self ):
pass
@slow
def __UpperCAmelCase( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
__A : Tuple = DPTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __UpperCAmelCase( self ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
__A , __A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__A : Dict = "add"
with self.assertRaises(__UpperCAmelCase ):
__A : int = DPTForDepthEstimation(__UpperCAmelCase )
def lowerCamelCase_ ( ) -> Optional[Any]:
__A : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class _a ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase( self ):
__A : List[str] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
__A : List[Any] = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(__UpperCAmelCase )
__A : Dict = prepare_img()
__A : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors="pt" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
__A : List[str] = model(**__UpperCAmelCase )
__A : str = outputs.predicted_depth
# verify the predicted depth
__A : Optional[int] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , __UpperCAmelCase )
__A : Any = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , __UpperCAmelCase , atol=1e-4 ) )
| 387 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 394 |
_lowerCamelCase : Optional[Any] = 256
# Modulus to hash a string
_lowerCamelCase : Optional[Any] = 1_000_003
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
SCREAMING_SNAKE_CASE : Tuple = len(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = len(__lowerCAmelCase )
if p_len > t_len:
return False
SCREAMING_SNAKE_CASE : Dict = 0
SCREAMING_SNAKE_CASE : Tuple = 0
SCREAMING_SNAKE_CASE : Optional[Any] = 1
# Calculating the hash of pattern and substring of text
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE : Tuple = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
SCREAMING_SNAKE_CASE : Union[str, Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
SCREAMING_SNAKE_CASE : Optional[Any] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
SCREAMING_SNAKE_CASE : int = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __a ( ) -> None:
SCREAMING_SNAKE_CASE : Any = 'abc1abc12'
SCREAMING_SNAKE_CASE : str = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
SCREAMING_SNAKE_CASE : List[str] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase ) and not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 2)
SCREAMING_SNAKE_CASE : List[str] = 'ABABX'
SCREAMING_SNAKE_CASE : int = 'ABABZABABYABABX'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 3)
SCREAMING_SNAKE_CASE : int = 'AAAB'
SCREAMING_SNAKE_CASE : Tuple = 'ABAAAAAB'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 4)
SCREAMING_SNAKE_CASE : Tuple = 'abcdabcy'
SCREAMING_SNAKE_CASE : Optional[Any] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
# Test 5)
SCREAMING_SNAKE_CASE : List[Any] = 'Lü'
SCREAMING_SNAKE_CASE : Optional[Any] = 'Lüsai'
assert rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'Lue'
assert not rabin_karp(__lowerCAmelCase , __lowerCAmelCase )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp() | 352 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_snake_case = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_snake_case = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCamelCase ( ) -> Tuple:
UpperCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase = bs[:]
UpperCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(a_ )
cs.append(2**8 + n )
n += 1
UpperCamelCase = [chr(a_ ) for n in cs]
return dict(zip(a_ , a_ ) )
def __lowerCamelCase ( _lowercase ) -> str:
UpperCamelCase = set()
UpperCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase = char
return pairs
class _lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : str =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Dict =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : int =["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int="replace" , SCREAMING_SNAKE_CASE__ : Tuple="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Tuple="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="<unk>" , SCREAMING_SNAKE_CASE__ : Dict="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : int=False , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(lowerCamelCase_ )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = errors # how to handle errors in decoding
UpperCamelCase = bytes_to_unicode()
UpperCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='utf-8' ) as merges_handle:
UpperCamelCase = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
UpperCamelCase = {}
UpperCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.encoder )
def __lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
UpperCamelCase = min(lowerCamelCase_ , key=lambda SCREAMING_SNAKE_CASE__ : self.bpe_ranks.get(lowerCamelCase_ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase = bigram
UpperCamelCase = []
UpperCamelCase = 0
while i < len(lowerCamelCase_ ):
try:
UpperCamelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase = tuple(lowerCamelCase_ )
UpperCamelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
UpperCamelCase = get_pairs(lowerCamelCase_ )
UpperCamelCase = ''' '''.join(lowerCamelCase_ )
UpperCamelCase = word
return word
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ):
"""simple docstring"""
UpperCamelCase = []
for token in re.findall(self.pat , lowerCamelCase_ ):
UpperCamelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(' ' ) )
return bpe_tokens
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ )
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
UpperCamelCase = ''''''.join(lowerCamelCase_ )
UpperCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCamelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '\n' )
UpperCamelCase = 0
with open(lowerCamelCase_ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE__ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase = token_index
writer.write(' '.join(lowerCamelCase_ ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def __lowerCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]=False , **SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
UpperCamelCase = ''' ''' + text
return (text, kwargs)
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[Dict[str, EncodedInput], BatchEncoding] , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ):
"""simple docstring"""
UpperCamelCase = super()._pad(
encoded_inputs=lowerCamelCase_ , max_length=lowerCamelCase_ , padding_strategy=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
UpperCamelCase = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCamelCase = len(encoded_inputs['global_attention_mask'] ) != len(lowerCamelCase_ )
if needs_to_be_padded:
UpperCamelCase = len(lowerCamelCase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCamelCase = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCamelCase = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 718 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_snake_case = logging.get_logger(__name__)
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
"""simple docstring"""
UpperCamelCase = question_encoder
UpperCamelCase = generator
UpperCamelCase = self.question_encoder
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , 'question_encoder_tokenizer' )
UpperCamelCase = os.path.join(SCREAMING_SNAKE_CASE__ , 'generator_tokenizer' )
self.question_encoder.save_pretrained(SCREAMING_SNAKE_CASE__ )
self.generator.save_pretrained(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ):
"""simple docstring"""
from ..auto.tokenization_auto import AutoTokenizer
UpperCamelCase = kwargs.pop('config' , SCREAMING_SNAKE_CASE__ )
if config is None:
UpperCamelCase = RagConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
UpperCamelCase = AutoTokenizer.from_pretrained(
SCREAMING_SNAKE_CASE__ , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ )
def __call__( self : Tuple , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
"""simple docstring"""
return self.current_tokenizer(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[str] ):
"""simple docstring"""
return self.generator.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Any , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
"""simple docstring"""
return self.generator.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
"""simple docstring"""
UpperCamelCase = self.question_encoder
def __lowerCAmelCase ( self : str ):
"""simple docstring"""
UpperCamelCase = self.generator
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : str = "longest" , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ):
"""simple docstring"""
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , SCREAMING_SNAKE_CASE__ , )
if max_length is None:
UpperCamelCase = self.current_tokenizer.model_max_length
UpperCamelCase = self(
SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCamelCase = self.current_tokenizer.model_max_length
UpperCamelCase = self(
text_target=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
UpperCamelCase = labels['input_ids']
return model_inputs
| 170 | 0 |
'''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
_A = ['vqvae']
def __init__( self , _A , _A , _A , _A , ) -> Tuple:
super().__init__()
self.register_modules(unet=_A , scheduler=_A , mel=_A , vqvae=_A )
def __magic_name__ ( self ) -> int:
return 50 if isinstance(self.scheduler , _A ) else 1000
@torch.no_grad()
def __call__( self , _A = 1 , _A = None , _A = None , _A = 0 , _A = 0 , _A = None , _A = None , _A = 0 , _A = 0 , _A = None , _A = 0 , _A = None , _A = None , _A=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__a : Union[str, Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(_A )
__a : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__a : Optional[Any] = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__a : Optional[int] = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_A , device=self.device , )
__a : Union[str, Any] = noise
__a : List[str] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_A , _A )
__a : Dict = self.mel.audio_slice_to_image(_A )
__a : str = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__a : str = (input_image / 255) * 2 - 1
__a : Dict = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__a : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_A , 0 ) ).latent_dist.sample(
generator=_A )[0]
__a : Optional[Any] = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__a : Union[str, Any] = self.scheduler.add_noise(_A , _A , self.scheduler.timesteps[start_step - 1] )
__a : Tuple = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__a : Optional[int] = int(mask_start_secs * pixels_per_second )
__a : Tuple = int(mask_end_secs * pixels_per_second )
__a : int = self.scheduler.add_noise(_A , _A , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _A ):
__a : Union[str, Any] = self.unet(_A , _A , _A )['sample']
else:
__a : Dict = self.unet(_A , _A )['sample']
if isinstance(self.scheduler , _A ):
__a : Optional[Any] = self.scheduler.step(
model_output=_A , timestep=_A , sample=_A , eta=_A , generator=_A , )['prev_sample']
else:
__a : Optional[int] = self.scheduler.step(
model_output=_A , timestep=_A , sample=_A , generator=_A , )['prev_sample']
if mask is not None:
if mask_start > 0:
__a : int = mask[:, step, :, :mask_start]
if mask_end > 0:
__a : List[str] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__a : Dict = 1 / self.vqvae.config.scaling_factor * images
__a : Dict = self.vqvae.decode(_A )['sample']
__a : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1 )
__a : List[str] = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__a : Optional[int] = (images * 255).round().astype('uint8' )
__a : Dict = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_A , mode='RGB' ).convert('L' ) for _ in images) )
__a : Tuple = [self.mel.image_to_audio(_A ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_A )[:, np.newaxis, :] ) , **ImagePipelineOutput(_A ) )
@torch.no_grad()
def __magic_name__ ( self , _A , _A = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , _A )
self.scheduler.set_timesteps(_A )
__a : int = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__a : int = (sample / 255) * 2 - 1
__a : str = torch.Tensor(_A ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__a : str = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__a : List[str] = self.scheduler.alphas_cumprod[t]
__a : Optional[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__a : int = 1 - alpha_prod_t
__a : Union[str, Any] = self.unet(_A , _A )['sample']
__a : List[Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__a : List[str] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__a : List[str] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __magic_name__ ( _A , _A , _A ) -> torch.Tensor:
__a : Dict = acos(torch.dot(torch.flatten(_A ) , torch.flatten(_A ) ) / torch.norm(_A ) / torch.norm(_A ) )
return sin((1 - alpha) * theta ) * xa / sin(_A ) + sin(alpha * theta ) * xa / sin(_A )
| 597 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar("KT")
SCREAMING_SNAKE_CASE_ = TypeVar("VT")
class lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _A = "root" , _A = None ) -> Optional[Any]:
__a : Dict = key
__a : Union[str, Any] = value
__a : list[Node[KT, VT]] = []
def __repr__( self ) -> str:
return f'''Node({self.key}: {self.value})'''
@property
def __magic_name__ ( self ) -> int:
return len(self.forward )
class lowerCAmelCase ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self , _A = 0.5 , _A = 16 ) -> str:
__a : Node[KT, VT] = Node[KT, VT]()
__a : Optional[Any] = 0
__a : Tuple = p
__a : List[Any] = max_level
def __str__( self ) -> str:
__a : Union[str, Any] = list(self )
if len(_A ) == 0:
return f'''SkipList(level={self.level})'''
__a : Optional[int] = max((len(str(_A ) ) for item in items) , default=4 )
__a : Optional[Any] = max(_A , 4 ) + 4
__a : Dict = self.head
__a : List[str] = []
__a : str = node.forward.copy()
lines.append(f'''[{node.key}]'''.ljust(_A , '-' ) + '* ' * len(_A ) )
lines.append(' ' * label_size + '| ' * len(_A ) )
while len(node.forward ) != 0:
__a : int = node.forward[0]
lines.append(
f'''[{node.key}]'''.ljust(_A , '-' )
+ ' '.join(str(n.key ) if n.key == node.key else '|' for n in forwards ) )
lines.append(' ' * label_size + '| ' * len(_A ) )
__a : List[Any] = node.forward
lines.append('None'.ljust(_A ) + '* ' * len(_A ) )
return f'''SkipList(level={self.level})\n''' + "\n".join(_A )
def __iter__( self ) -> Optional[int]:
__a : List[str] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__a : Any = node.forward[0]
def __magic_name__ ( self ) -> int:
__a : str = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __magic_name__ ( self , _A ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
__a : str = []
__a : Optional[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__a : Optional[int] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __magic_name__ ( self , _A ) -> Tuple:
__a , __a : Optional[Any] = self._locate_node(_A )
if node is not None:
for i, update_node in enumerate(_A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__a : List[Any] = node.forward[i]
else:
__a : Tuple = update_node.forward[:i]
def __magic_name__ ( self , _A , _A ) -> Optional[int]:
__a , __a : Optional[int] = self._locate_node(_A )
if node is not None:
__a : Union[str, Any] = value
else:
__a : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _A ):
update_vector.append(self.head )
__a : str = level
__a : Optional[int] = Node(_A , _A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_A )
else:
__a : Tuple = new_node
def __magic_name__ ( self , _A ) -> VT | None:
__a , __a : str = self._locate_node(_A )
if node is not None:
return node.value
return None
def lowerCAmelCase__ ( ):
__a : str = SkipList()
skip_list.insert('Key1' , 3 )
skip_list.insert('Key2' , 12 )
skip_list.insert('Key3' , 41 )
skip_list.insert('Key4' , -19 )
__a : Dict = skip_list.head
__a : Optional[Any] = {}
while node.level != 0:
__a : Union[str, Any] = node.forward[0]
__a : Any = node.value
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowerCAmelCase__ ( ):
__a : Tuple = SkipList()
skip_list.insert('Key1' , 10 )
skip_list.insert('Key1' , 12 )
skip_list.insert('Key5' , 7 )
skip_list.insert('Key7' , 10 )
skip_list.insert('Key10' , 5 )
skip_list.insert('Key7' , 7 )
skip_list.insert('Key5' , 5 )
skip_list.insert('Key10' , 10 )
__a : Optional[int] = skip_list.head
__a : Optional[int] = {}
while node.level != 0:
__a : List[Any] = node.forward[0]
__a : Dict = node.value
if len(SCREAMING_SNAKE_CASE__ ) != 4:
print()
assert len(SCREAMING_SNAKE_CASE__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowerCAmelCase__ ( ):
__a : Optional[int] = SkipList()
assert skip_list.find('Some key' ) is None
def lowerCAmelCase__ ( ):
__a : str = SkipList()
skip_list.insert('Key2' , 20 )
assert skip_list.find('Key2' ) == 20
skip_list.insert('Some Key' , 10 )
skip_list.insert('Key2' , 8 )
skip_list.insert('V' , 13 )
assert skip_list.find('Y' ) is None
assert skip_list.find('Key2' ) == 8
assert skip_list.find('Some Key' ) == 10
assert skip_list.find('V' ) == 13
def lowerCAmelCase__ ( ):
__a : List[str] = SkipList()
skip_list.delete('Some key' )
assert len(skip_list.head.forward ) == 0
def lowerCAmelCase__ ( ):
__a : int = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('Key2' ) is None
def lowerCAmelCase__ ( ):
__a : Tuple = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 14 )
skip_list.insert('Key2' , 15 )
skip_list.delete('V' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) == 14
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('X' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) == 12
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key1' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) == 15
skip_list.delete('Key2' )
assert skip_list.find('V' ) is None
assert skip_list.find('X' ) is None
assert skip_list.find('Key1' ) is None
assert skip_list.find('Key2' ) is None
def lowerCAmelCase__ ( ):
__a : Optional[int] = SkipList()
skip_list.insert('Key1' , 12 )
skip_list.insert('V' , 13 )
skip_list.insert('X' , 142 )
skip_list.insert('Key2' , 15 )
skip_list.delete('X' )
def traverse_keys(SCREAMING_SNAKE_CASE__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(SCREAMING_SNAKE_CASE__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowerCAmelCase__ ( ):
def is_sorted(SCREAMING_SNAKE_CASE__ ):
return all(next_item >= item for item, next_item in zip(SCREAMING_SNAKE_CASE__ , lst[1:] ) )
__a : Any = SkipList()
for i in range(10 ):
skip_list.insert(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(SCREAMING_SNAKE_CASE__ ) )
def lowerCAmelCase__ ( ):
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowerCAmelCase__ ( ):
__a : Tuple = SkipList()
skip_list.insert(2 , '2' )
skip_list.insert(4 , '4' )
skip_list.insert(6 , '4' )
skip_list.insert(4 , '5' )
skip_list.insert(8 , '4' )
skip_list.insert(9 , '4' )
skip_list.delete(4 )
print(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 597 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Tuple = WavaVecaForSequenceClassification.from_pretrained(__lowerCamelCase, config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Tuple = downstream_dict["projector.weight"]
_SCREAMING_SNAKE_CASE : int = downstream_dict["projector.bias"]
_SCREAMING_SNAKE_CASE : int = downstream_dict["model.post_net.linear.weight"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict["model.post_net.linear.bias"]
return model
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = WavaVecaForAudioFrameClassification.from_pretrained(__lowerCamelCase, config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = downstream_dict["model.linear.weight"]
_SCREAMING_SNAKE_CASE : List[str] = downstream_dict["model.linear.bias"]
return model
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = WavaVecaForXVector.from_pretrained(__lowerCamelCase, config=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = downstream_dict["connector.weight"]
_SCREAMING_SNAKE_CASE : Dict = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_SCREAMING_SNAKE_CASE : Optional[Any] = downstream_dict[
f"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_SCREAMING_SNAKE_CASE : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_SCREAMING_SNAKE_CASE : Dict = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_SCREAMING_SNAKE_CASE : Dict = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(__lowerCamelCase, map_location="cpu" )
_SCREAMING_SNAKE_CASE : List[str] = checkpoint["Downstream"]
_SCREAMING_SNAKE_CASE : Any = WavaVecaConfig.from_pretrained(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = WavaVecaFeatureExtractor.from_pretrained(
__lowerCamelCase, return_attention_mask=__lowerCamelCase, do_normalize=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_SCREAMING_SNAKE_CASE : Any = convert_classification(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
_SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
elif arch.endswith("ForXVector" ):
_SCREAMING_SNAKE_CASE : str = convert_xvector(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_SCREAMING_SNAKE_CASE : str = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCamelCase__ =parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path) | 721 |
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
while a != 0:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = b % a, a
return b
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
if gcd(__lowerCamelCase, __lowerCamelCase ) != 1:
_SCREAMING_SNAKE_CASE : int = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = 1, 0, a
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = 0, 1, m
while va != 0:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ua // va
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m | 381 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : torch.FloatTensor
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self : Optional[Any] , __UpperCamelCase : List[Any] = 3 , __UpperCamelCase : List[Any] = 3 , __UpperCamelCase : int = ("DownEncoderBlock2D",) , __UpperCamelCase : Optional[int] = ("UpDecoderBlock2D",) , __UpperCamelCase : str = (64,) , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : List[str] = "silu" , __UpperCamelCase : Optional[int] = 3 , __UpperCamelCase : Dict = 32 , __UpperCamelCase : Dict = 256 , __UpperCamelCase : List[str] = 32 , __UpperCamelCase : str = None , __UpperCamelCase : int = 0.1_8_2_1_5 , __UpperCamelCase : Optional[int] = "group" , ) -> Any:
super().__init__()
# pass init params to Encoder
A = Encoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , down_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , double_z=UpperCamelCase__ , )
A = vq_embed_dim if vq_embed_dim is not None else latent_channels
A = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A = VectorQuantizer(UpperCamelCase__ , UpperCamelCase__ , beta=0.2_5 , remap=UpperCamelCase__ , sane_index_shape=UpperCamelCase__ )
A = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
# pass init params to Decoder
A = Decoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , up_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , norm_type=UpperCamelCase__ , )
@apply_forward_hook
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple = True ) -> VQEncoderOutput:
A = self.encoder(UpperCamelCase__ )
A = self.quant_conv(UpperCamelCase__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase__ )
@apply_forward_hook
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : str = False , __UpperCamelCase : int = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if not force_not_quantize:
A , A , A = self.quantize(UpperCamelCase__ )
else:
A = h
A = self.post_quant_conv(UpperCamelCase__ )
A = self.decoder(UpperCamelCase__ , quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ )
def __UpperCamelCase ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] = True ) -> Union[DecoderOutput, torch.FloatTensor]:
A = sample
A = self.encode(UpperCamelCase__ ).latents
A = self.decode(UpperCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__ ) | 106 |
"""simple docstring"""
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
__lowercase : Optional[int] = logging.getLogger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__=-1 ) -> str:
'''simple docstring'''
lowerCamelCase_ = label_idx
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = mode.value
lowerCamelCase_ = os.path.join(UpperCamelCase__ , F"""{mode}.txt""" )
lowerCamelCase_ = 1
lowerCamelCase_ = []
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
lowerCamelCase_ = []
lowerCamelCase_ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
lowerCamelCase_ = []
lowerCamelCase_ = []
else:
lowerCamelCase_ = line.split(''' ''' )
words.append(splits[0] )
if len(UpperCamelCase__ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
return examples
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(UpperCamelCase__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase_ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(UpperCamelCase__ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if path:
with open(UpperCamelCase__ , '''r''' ) as f:
lowerCamelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCamelCase_ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self ) -> Optional[int]:
'''simple docstring'''
super().__init__(label_idx=-2 )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if path:
with open(UpperCamelCase__ , '''r''' ) as f:
lowerCamelCase_ = f.read().splitlines()
if "O" not in labels:
lowerCamelCase_ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCAmelCase ( a ):
"""simple docstring"""
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[InputExample]:
'''simple docstring'''
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase_ = mode.value
lowerCamelCase_ = os.path.join(UpperCamelCase__ , F"""{mode}.txt""" )
lowerCamelCase_ = 1
lowerCamelCase_ = []
with open(UpperCamelCase__ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=UpperCamelCase__ , labels=UpperCamelCase__ ) )
guid_index += 1
return examples
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> int:
'''simple docstring'''
lowerCamelCase_ = 0
for sentence in parse_incr(UpperCamelCase__ ):
lowerCamelCase_ = preds_list[example_id]
lowerCamelCase_ = ''''''
for token in sentence:
out += F"""{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) """
out += "\n"
writer.write(UpperCamelCase__ )
example_id += 1
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
if path:
with open(UpperCamelCase__ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
] | 142 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
UpperCamelCase__ = XLMRobertaModel.from_pretrained('xlm-roberta-base' )
UpperCamelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ = torch.tensor(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = XLMRobertaModel.from_pretrained('xlm-roberta-large' )
UpperCamelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase__ = torch.tensor(
[[-0.0_699, -0.0_318, 0.0_705, -0.1_241, 0.0_999, -0.0_520, 0.1_004, -0.1_838, -0.4_704, 0.1_437, 0.0_821, 0.0_126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase__ = model(snake_case_ )['last_hidden_state'].detach()
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , snake_case_ , atol=1E-3 ) )
| 717 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : str= logging.get_logger(__name__)
A__ : List[Any]= {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowerCamelCase ( _a ):
a : Any ="""segformer"""
def __init__( self , snake_case_=3 , snake_case_=4 , snake_case_=[2, 2, 2, 2] , snake_case_=[8, 4, 2, 1] , snake_case_=[32, 64, 160, 256] , snake_case_=[7, 3, 3, 3] , snake_case_=[4, 2, 2, 2] , snake_case_=[1, 2, 5, 8] , snake_case_=[4, 4, 4, 4] , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.1 , snake_case_=0.02 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=256 , snake_case_=255 , **snake_case_ , ) -> Tuple:
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , snake_case_ , )
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_encoder_blocks
UpperCamelCase__ = depths
UpperCamelCase__ = sr_ratios
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = patch_sizes
UpperCamelCase__ = strides
UpperCamelCase__ = mlp_ratios
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = classifier_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = decoder_hidden_size
UpperCamelCase__ = kwargs.get('reshape_last_stage' , snake_case_ )
UpperCamelCase__ = semantic_loss_ignore_index
class __lowerCamelCase ( _a ):
a : Any =version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
return 1E-4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
return 12
| 20 | 0 |
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : List[str] ):
if isinstance(__snake_case , torch.Tensor ):
return image
elif isinstance(__snake_case , PIL.Image.Image ):
_A = [image]
if isinstance(image[0] , PIL.Image.Image ):
_A = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_A = np.concatenate(__snake_case , axis=0 )
_A = np.array(__snake_case ).astype(np.floataa ) / 2_55.0
_A = image.transpose(0 , 3 , 1 , 2 )
_A = 2.0 * image - 1.0
_A = torch.from_numpy(__snake_case )
elif isinstance(image[0] , torch.Tensor ):
_A = torch.cat(__snake_case , dim=0 )
return image
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : List[str] , __snake_case : Optional[Any] , __snake_case : Optional[Any]=0.99_95 ):
if not isinstance(__snake_case , np.ndarray ):
_A = True
_A = va.device
_A = va.cpu().numpy()
_A = va.cpu().numpy()
_A = np.sum(va * va / (np.linalg.norm(__snake_case ) * np.linalg.norm(__snake_case )) )
if np.abs(__snake_case ) > DOT_THRESHOLD:
_A = (1 - t) * va + t * va
else:
_A = np.arccos(__snake_case )
_A = np.sin(__snake_case )
_A = theta_a * t
_A = np.sin(__snake_case )
_A = np.sin(theta_a - theta_t ) / sin_theta_a
_A = sin_theta_t / sin_theta_a
_A = sa * va + sa * va
if inputs_are_torch:
_A = torch.from_numpy(__snake_case ).to(__snake_case )
return va
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : int ):
_A = F.normalize(__snake_case , dim=-1 )
_A = F.normalize(__snake_case , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : str ):
for param in model.parameters():
_A = value
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str], UpperCamelCase__ : AutoencoderKL, UpperCamelCase__ : CLIPTextModel, UpperCamelCase__ : CLIPModel, UpperCamelCase__ : CLIPTokenizer, UpperCamelCase__ : UNetaDConditionModel, UpperCamelCase__ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler], UpperCamelCase__ : CLIPFeatureExtractor, UpperCamelCase__ : Any=None, UpperCamelCase__ : Optional[Any]=None, UpperCamelCase__ : Tuple=None, ) -> Optional[Any]:
super().__init__()
self.register_modules(
vae=UpperCamelCase__, text_encoder=UpperCamelCase__, clip_model=UpperCamelCase__, tokenizer=UpperCamelCase__, unet=UpperCamelCase__, scheduler=UpperCamelCase__, feature_extractor=UpperCamelCase__, coca_model=UpperCamelCase__, coca_tokenizer=UpperCamelCase__, coca_transform=UpperCamelCase__, )
_A = (
feature_extractor.size
if isinstance(feature_extractor.size, UpperCamelCase__ )
else feature_extractor.size['shortest_edge']
)
_A = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std )
set_requires_grad(self.text_encoder, UpperCamelCase__ )
set_requires_grad(self.clip_model, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Optional[Union[str, int]] = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
self.enable_attention_slicing(UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
set_requires_grad(self.vae, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
set_requires_grad(self.vae, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> int:
set_requires_grad(self.unet, UpperCamelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Any:
set_requires_grad(self.unet, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[str], UpperCamelCase__ : Any ) -> Dict:
# get the original timestep using init_timestep
_A = min(int(num_inference_steps * strength ), UpperCamelCase__ )
_A = max(num_inference_steps - init_timestep, 0 )
_A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : str, UpperCamelCase__ : Tuple, UpperCamelCase__ : Any, UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any]=None ) -> Any:
if not isinstance(UpperCamelCase__, torch.Tensor ):
raise ValueError(f'`image` has to be of type `torch.Tensor` but is {type(UpperCamelCase__ )}' )
_A = image.to(device=UpperCamelCase__, dtype=UpperCamelCase__ )
if isinstance(UpperCamelCase__, UpperCamelCase__ ):
_A = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(UpperCamelCase__ )
]
_A = torch.cat(UpperCamelCase__, dim=0 )
else:
_A = self.vae.encode(UpperCamelCase__ ).latent_dist.sample(UpperCamelCase__ )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 0.18_215 * init_latents
_A = init_latents.repeat_interleave(UpperCamelCase__, dim=0 )
_A = randn_tensor(init_latents.shape, generator=UpperCamelCase__, device=UpperCamelCase__, dtype=UpperCamelCase__ )
# get latents
_A = self.scheduler.add_noise(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
_A = init_latents
return latents
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Optional[int] ) -> List[str]:
_A = self.coca_transform(UpperCamelCase__ ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_A = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype ) )
_A = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>', '' ).rstrip(' .,' )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[Any] ) -> List[str]:
_A = self.feature_extractor.preprocess(UpperCamelCase__ )
_A = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
_A = self.clip_model.get_image_features(UpperCamelCase__ )
_A = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCamelCase__ )
_A = image_embeddings_clip.repeat_interleave(UpperCamelCase__, dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def __UpperCAmelCase ( self : List[Any], UpperCamelCase__ : Any, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : List[str], UpperCamelCase__ : List[Any], ) -> int:
_A = latents.detach().requires_grad_()
_A = self.scheduler.scale_model_input(UpperCamelCase__, UpperCamelCase__ )
# predict the noise residual
_A = self.unet(UpperCamelCase__, UpperCamelCase__, encoder_hidden_states=UpperCamelCase__ ).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_A = self.scheduler.alphas_cumprod[timestep]
_A = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_A = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_A = torch.sqrt(UpperCamelCase__ )
_A = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, UpperCamelCase__ ):
_A = self.scheduler.sigmas[index]
_A = latents - sigma * noise_pred
else:
raise ValueError(f'scheduler type {type(self.scheduler )} not supported' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 1 / 0.18_215 * sample
_A = self.vae.decode(UpperCamelCase__ ).sample
_A = (image / 2 + 0.5).clamp(0, 1 )
_A = transforms.Resize(self.feature_extractor_size )(UpperCamelCase__ )
_A = self.normalize(UpperCamelCase__ ).to(latents.dtype )
_A = self.clip_model.get_image_features(UpperCamelCase__ )
_A = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=UpperCamelCase__ )
_A = spherical_dist_loss(UpperCamelCase__, UpperCamelCase__ ).mean() * clip_guidance_scale
_A = -torch.autograd.grad(UpperCamelCase__, UpperCamelCase__ )[0]
if isinstance(self.scheduler, UpperCamelCase__ ):
_A = latents.detach() + grads * (sigma**2)
_A = noise_pred_original
else:
_A = noise_pred_original - torch.sqrt(UpperCamelCase__ ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : Optional[Any], UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image], UpperCamelCase__ : Optional[str] = None, UpperCamelCase__ : Optional[str] = None, UpperCamelCase__ : Optional[int] = 5_12, UpperCamelCase__ : Optional[int] = 5_12, UpperCamelCase__ : float = 0.6, UpperCamelCase__ : Optional[int] = 50, UpperCamelCase__ : Optional[float] = 7.5, UpperCamelCase__ : Optional[int] = 1, UpperCamelCase__ : float = 0.0, UpperCamelCase__ : Optional[float] = 1_00, UpperCamelCase__ : Optional[torch.Generator] = None, UpperCamelCase__ : Optional[str] = "pil", UpperCamelCase__ : bool = True, UpperCamelCase__ : float = 0.8, UpperCamelCase__ : float = 0.1, UpperCamelCase__ : float = 0.1, ) -> Tuple:
if isinstance(UpperCamelCase__, UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(f'You have passed {batch_size} batch_size, but only {len(UpperCamelCase__ )} generators.' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if isinstance(UpperCamelCase__, torch.Generator ) and batch_size > 1:
_A = [generator] + [None] * (batch_size - 1)
_A = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
_A = [x[0] for x in coca_is_none if x[1]]
_A = ', '.join(UpperCamelCase__ )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCamelCase__ ):
raise ValueError(
f'Content prompt is None and CoCa [{coca_is_none_str}] is None.'
f'Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_A = self.get_image_description(UpperCamelCase__ )
if style_prompt is None:
if len(UpperCamelCase__ ):
raise ValueError(
f'Style prompt is None and CoCa [{coca_is_none_str}] is None.'
f' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.' )
_A = self.get_image_description(UpperCamelCase__ )
# get prompt text embeddings for content and style
_A = self.tokenizer(
UpperCamelCase__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=UpperCamelCase__, return_tensors='pt', )
_A = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_A = self.tokenizer(
UpperCamelCase__, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=UpperCamelCase__, return_tensors='pt', )
_A = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_A = slerp(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# duplicate text embeddings for each generation per prompt
_A = text_embeddings.repeat_interleave(UpperCamelCase__, dim=0 )
# set timesteps
_A = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_A = {}
if accepts_offset:
_A = 1
self.scheduler.set_timesteps(UpperCamelCase__, **UpperCamelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_A , _A = self.get_timesteps(UpperCamelCase__, UpperCamelCase__, self.device )
_A = timesteps[:1].repeat(UpperCamelCase__ )
# Preprocess image
_A = preprocess(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
_A = self.prepare_latents(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, text_embeddings.dtype, self.device, UpperCamelCase__ )
_A = preprocess(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
_A = self.prepare_latents(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, text_embeddings.dtype, self.device, UpperCamelCase__ )
_A = slerp(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
if clip_guidance_scale > 0:
_A = self.get_clip_image_embeddings(UpperCamelCase__, UpperCamelCase__ )
_A = self.get_clip_image_embeddings(UpperCamelCase__, UpperCamelCase__ )
_A = slerp(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = content_text_input.input_ids.shape[-1]
_A = self.tokenizer([''], padding='max_length', max_length=UpperCamelCase__, return_tensors='pt' )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_A = uncond_embeddings.repeat_interleave(UpperCamelCase__, dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_A = torch.randn(UpperCamelCase__, generator=UpperCamelCase__, device='cpu', dtype=UpperCamelCase__ ).to(
self.device )
else:
_A = torch.randn(UpperCamelCase__, generator=UpperCamelCase__, device=self.device, dtype=UpperCamelCase__ )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_A = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
# check if the scheduler accepts generator
_A = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_A = generator
with self.progress_bar(total=UpperCamelCase__ ):
for i, t in enumerate(UpperCamelCase__ ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(UpperCamelCase__, UpperCamelCase__ )
# predict the noise residual
_A = self.unet(UpperCamelCase__, UpperCamelCase__, encoder_hidden_states=UpperCamelCase__ ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_A = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_A , _A = self.cond_fn(
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, )
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_A = 1 / 0.18_215 * latents
_A = self.vae.decode(UpperCamelCase__ ).sample
_A = (image / 2 + 0.5).clamp(0, 1 )
_A = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
_A = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCamelCase__, nsfw_content_detected=UpperCamelCase__ )
| 107 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
a_ :List[str] = False
a_ :Any = False
def lowercase_ (A : Namespace ):
return TrainCommand(A )
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
@staticmethod
def lowercase_ ( _snake_case : ArgumentParser ) ->Dict:
snake_case__ : List[str] = parser.add_parser('train', help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data', type=_snake_case, required=_snake_case, help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.', )
train_parser.add_argument(
'--column_label', type=_snake_case, default=0, help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text', type=_snake_case, default=1, help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id', type=_snake_case, default=2, help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row', action='store_true', help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data', type=_snake_case, default='', help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split', type=_snake_case, default=0.1, help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.', )
train_parser.add_argument('--output', type=_snake_case, default='./', help='path to saved the trained model.' )
train_parser.add_argument(
'--task', type=_snake_case, default='text_classification', help='Task to train the model on.' )
train_parser.add_argument(
'--model', type=_snake_case, default='bert-base-uncased', help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size', type=_snake_case, default=3_2, help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size', type=_snake_case, default=6_4, help='Batch size for validation.' )
train_parser.add_argument('--learning_rate', type=_snake_case, default=3e-5, help='Learning rate.' )
train_parser.add_argument('--adam_epsilon', type=_snake_case, default=1e-08, help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_snake_case )
def __init__( self : Optional[int], _snake_case : Namespace ) ->Union[str, Any]:
snake_case__ : int = logging.get_logger('transformers-cli/training' )
snake_case__ : int = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output, exist_ok=_snake_case )
snake_case__ : Any = args.output
snake_case__ : Optional[int] = args.column_label
snake_case__ : Tuple = args.column_text
snake_case__ : Tuple = args.column_id
self.logger.info(F'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
snake_case__ : Dict = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F'''Loading dataset from {args.train_data}''' )
snake_case__ : List[str] = Processor.create_from_csv(
args.train_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
snake_case__ : str = None
if args.validation_data:
self.logger.info(F'''Loading validation dataset from {args.validation_data}''' )
snake_case__ : Optional[int] = Processor.create_from_csv(
args.validation_data, column_label=args.column_label, column_text=args.column_text, column_id=args.column_id, skip_first_row=args.skip_first_row, )
snake_case__ : List[str] = args.validation_split
snake_case__ : List[str] = args.train_batch_size
snake_case__ : str = args.valid_batch_size
snake_case__ : int = args.learning_rate
snake_case__ : List[Any] = args.adam_epsilon
def lowercase_ ( self : Any ) ->Dict:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def lowercase_ ( self : Dict ) ->List[Any]:
raise NotImplementedError
def lowercase_ ( self : str ) ->Optional[Any]:
self.pipeline.fit(
self.train_dataset, validation_data=self.valid_dataset, validation_split=self.validation_split, learning_rate=self.learning_rate, adam_epsilon=self.adam_epsilon, train_batch_size=self.train_batch_size, valid_batch_size=self.valid_batch_size, )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 478 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''kssteven/ibert-roberta-base''': '''https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json''',
'''kssteven/ibert-roberta-large''': '''https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json''',
'''kssteven/ibert-roberta-large-mnli''': (
'''https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'''
),
}
class A__ ( _snake_case ):
lowercase = "ibert"
def __init__( self , UpperCamelCase__=30522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-1_2 , UpperCamelCase__=1 , UpperCamelCase__=0 , UpperCamelCase__=2 , UpperCamelCase__="absolute" , UpperCamelCase__=False , UpperCamelCase__="none" , **UpperCamelCase__ , ) -> str:
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = hidden_act
A_ = intermediate_size
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = initializer_range
A_ = layer_norm_eps
A_ = position_embedding_type
A_ = quant_mode
A_ = force_dequant
class A__ ( _snake_case ):
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
A_ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 667 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 667 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : Optional[Any] = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ["""DeiTFeatureExtractor"""]
lowerCAmelCase : int = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 671 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = '''upernet'''
def __init__( self : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=512 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : str=[1, 2, 3, 6] , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Dict=0.4 , lowerCAmelCase__ : int=384 , lowerCAmelCase__ : Union[str, Any]=256 , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : List[str]=255 , **lowerCAmelCase__ : List[str] , ):
super().__init__(**lowerCAmelCase__)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
SCREAMING_SNAKE_CASE_: Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: str = backbone_config.get("model_type")
SCREAMING_SNAKE_CASE_: str = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE_: Tuple = config_class.from_dict(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = backbone_config
SCREAMING_SNAKE_CASE_: Optional[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = pool_scales
SCREAMING_SNAKE_CASE_: Optional[Any] = use_auxiliary_head
SCREAMING_SNAKE_CASE_: str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE_: List[Any] = auxiliary_in_channels
SCREAMING_SNAKE_CASE_: Union[str, Any] = auxiliary_channels
SCREAMING_SNAKE_CASE_: Dict = auxiliary_num_convs
SCREAMING_SNAKE_CASE_: str = auxiliary_concat_input
SCREAMING_SNAKE_CASE_: Dict = loss_ignore_index
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = copy.deepcopy(self.__dict__)
SCREAMING_SNAKE_CASE_: int = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE_: Optional[int] = self.__class__.model_type
return output
| 671 | 1 |
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class A_ ( unittest.TestCase ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : List[str]=5_6 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Dict=9_9 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,SCREAMING_SNAKE_CASE__ : int=7 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu_new" ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : str="block_sparse" ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : Union[str, Any] = seq_length
__lowerCamelCase : Tuple = is_training
__lowerCamelCase : List[Any] = use_attention_mask
__lowerCamelCase : List[Any] = use_token_type_ids
__lowerCamelCase : Union[str, Any] = use_labels
__lowerCamelCase : Tuple = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : List[str] = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Union[str, Any] = num_choices
__lowerCamelCase : List[Any] = rescale_embeddings
__lowerCamelCase : Dict = attention_type
__lowerCamelCase : List[str] = use_bias
__lowerCamelCase : List[str] = block_size
__lowerCamelCase : Union[str, Any] = num_random_blocks
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
__lowerCamelCase : Union[str, Any] = None
if self.use_attention_mask:
__lowerCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : List[Any] = None
if self.use_token_type_ids:
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
__lowerCamelCase : int = BigBirdConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,block_size=self.block_size ,num_random_blocks=self.num_random_blocks ,use_bias=self.use_bias ,rescale_embeddings=self.rescale_embeddings ,)
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : List[str] = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Dict = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : int = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase ( self : int):
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase ( self : Dict):
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase ( self : Any):
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase ( self : List[str]):
super().test_hidden_states_output()
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/bigbird-roberta-base')
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__)
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,**SCREAMING_SNAKE_CASE__ : int):
return model(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
with self.subTest('JIT Enabled'):
__lowerCamelCase : Dict = model_jitted(**SCREAMING_SNAKE_CASE__).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE__).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(SCREAMING_SNAKE_CASE__))
for jitted_output, output in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
self.assertEqual(jitted_output.shape ,output.shape)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int=1E-5 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="outputs" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None):
# `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version,
# an effort was done to return `attention_probs` (yet to be verified).
if name.startswith('outputs.attentions'):
return
else:
super().check_pt_flax_outputs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 337 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (PNDMScheduler,)
_UpperCAmelCase : str = (('''num_inference_steps''', 50),)
def lowerCAmelCase ( self : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : Optional[int] = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Tuple=0 ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : str = dict(self.forward_default_kwargs)
__lowerCamelCase : Union[str, Any] = kwargs.pop('num_inference_steps' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.dummy_sample
__lowerCamelCase : Union[str, Any] = 0.1 * sample
__lowerCamelCase : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase : int = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__)
# copy over dummy past residuals
__lowerCamelCase : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__)
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__)
# copy over dummy past residuals
__lowerCamelCase : List[str] = dummy_past_residuals[:]
__lowerCamelCase : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[int] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
__lowerCamelCase : Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[int] = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : Dict):
pass
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple=0 ,**SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : int = dict(self.forward_default_kwargs)
__lowerCamelCase : Union[str, Any] = kwargs.pop('num_inference_steps' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.dummy_sample
__lowerCamelCase : Any = 0.1 * sample
__lowerCamelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__)
# copy over dummy past residuals (must be after setting timesteps)
__lowerCamelCase : List[str] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__)
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__)
# copy over dummy past residual (must be after setting timesteps)
__lowerCamelCase : List[str] = dummy_past_residuals[:]
__lowerCamelCase : Optional[Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : List[Any] = new_scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
__lowerCamelCase : str = scheduler.step_plms(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Any = new_scheduler.step_plms(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = 1_0
__lowerCamelCase : int = self.dummy_model()
__lowerCamelCase : int = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__)
for i, t in enumerate(scheduler.prk_timesteps):
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = scheduler.step_plms(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).prev_sample
return sample
def lowerCAmelCase ( self : int):
__lowerCamelCase : Tuple = dict(self.forward_default_kwargs)
__lowerCamelCase : List[str] = kwargs.pop('num_inference_steps' ,SCREAMING_SNAKE_CASE__)
for scheduler_class in self.scheduler_classes:
__lowerCamelCase : Optional[int] = self.get_scheduler_config()
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.dummy_sample
__lowerCamelCase : Tuple = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ ,'set_timesteps'):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__)
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ ,'set_timesteps'):
__lowerCamelCase : List[Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCamelCase : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__lowerCamelCase : Optional[int] = dummy_past_residuals[:]
__lowerCamelCase : Optional[int] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,0 ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,1 ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
__lowerCamelCase : Optional[Any] = scheduler.step_plms(SCREAMING_SNAKE_CASE__ ,0 ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Any = scheduler.step_plms(SCREAMING_SNAKE_CASE__ ,1 ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__).prev_sample
self.assertEqual(output_a.shape ,sample.shape)
self.assertEqual(output_a.shape ,output_a.shape)
def lowerCAmelCase ( self : Any):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(steps_offset=1)
__lowerCamelCase : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(1_0)
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1]) ,)
def lowerCAmelCase ( self : Optional[int]):
for beta_start, beta_end in zip([0.0001, 0.001] ,[0.002, 0.02]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE__ ,beta_end=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0]):
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowerCamelCase : str = 2_7
for scheduler_class in self.scheduler_classes:
__lowerCamelCase : Optional[Any] = self.dummy_sample
__lowerCamelCase : Dict = 0.1 * sample
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : str = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
__lowerCamelCase : Tuple = scheduler.step_prk(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).prev_sample
def lowerCAmelCase ( self : Optional[Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample).prev_sample
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = self.full_loop()
__lowerCamelCase : Any = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 198.1318) < 1E-2
assert abs(result_mean.item() - 0.2580) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = self.full_loop(prediction_type='v_prediction')
__lowerCamelCase : str = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 67.3986) < 1E-2
assert abs(result_mean.item() - 0.0878) < 1E-3
def lowerCAmelCase ( self : Optional[Any]):
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase : Optional[Any] = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,beta_start=0.01)
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 230.0399) < 1E-2
assert abs(result_mean.item() - 0.2995) < 1E-3
def lowerCAmelCase ( self : Optional[int]):
# We specify different beta, so that the first alpha is 0.99
__lowerCamelCase : Dict = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,beta_start=0.01)
__lowerCamelCase : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Any = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 186.9482) < 1E-2
assert abs(result_mean.item() - 0.2434) < 1E-3
| 337 | 1 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[list[float]]:
_lowercase : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowercase : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_lowercase : Optional[Any] = [[0.0, 0.0], [0.0, 0.0]]
_lowercase , _lowercase : str = matrix[1][1], matrix[0][0]
_lowercase , _lowercase : Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowercase : str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_lowercase : str = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowercase : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowercase : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowercase : Dict = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowercase : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowercase : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowercase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowercase : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowercase : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowercase : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowercase : Tuple = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
_lowercase : str = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowercase : List[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 66 | import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCamelCase ( A ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__magic_name__ =parser.parse_args()
__magic_name__ =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 415 | 0 |
'''simple docstring'''
__UpperCAmelCase = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def _snake_case ( A ) -> int:
lowerCAmelCase__ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCAmelCase__ = Stack()
lowerCAmelCase__ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(A ) )
elif i in operators:
# RULE 2
operator_stack.push(A )
elif i == ")":
# RULE 4
lowerCAmelCase__ = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ = operators[opr](A , A )
operand_stack.push(A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__UpperCAmelCase = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 98 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '''T5Config'''
def _snake_case ( A , A , A ) -> jnp.ndarray:
lowerCAmelCase__ = jnp.zeros_like(A )
lowerCAmelCase__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase__ = shifted_input_ids.at[:, 0].set(A )
lowerCAmelCase__ = jnp.where(shifted_input_ids == -100 , A , A )
return shifted_input_ids
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "mt5"
lowercase__ : Dict = MTaConfig
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "mt5"
lowercase__ : Any = MTaConfig
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = "mt5"
lowercase__ : Tuple = MTaConfig | 98 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Dict = 'transfo-xl'
lowerCAmelCase : int = ['mems']
lowerCAmelCase : Optional[int] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : int ,_UpperCAmelCase : Union[str, Any]=267735 ,_UpperCAmelCase : Optional[Any]=[20000, 40000, 200000] ,_UpperCAmelCase : Any=1024 ,_UpperCAmelCase : List[Any]=1024 ,_UpperCAmelCase : str=16 ,_UpperCAmelCase : Union[str, Any]=64 ,_UpperCAmelCase : Optional[int]=4096 ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : Optional[int]=False ,_UpperCAmelCase : Union[str, Any]=18 ,_UpperCAmelCase : List[str]=1600 ,_UpperCAmelCase : List[Any]=1000 ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Union[str, Any]=True ,_UpperCAmelCase : Any=0 ,_UpperCAmelCase : Union[str, Any]=-1 ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Dict=True ,_UpperCAmelCase : Optional[Any]="normal" ,_UpperCAmelCase : Dict=0.01 ,_UpperCAmelCase : Union[str, Any]=0.01 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Tuple=1E-5 ,_UpperCAmelCase : Optional[Any]=0 ,**_UpperCAmelCase : List[Any] ,):
_a : str = vocab_size
_a : Tuple = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
_a : Optional[Any] = [False] + [True] * len(self.cutoffs )
else:
_a : Optional[Any] = [False] + [False] * len(self.cutoffs )
_a : Union[str, Any] = d_model
_a : Dict = d_embed
_a : str = d_head
_a : Dict = d_inner
_a : Dict = div_val
_a : Optional[Any] = pre_lnorm
_a : Any = n_layer
_a : Optional[int] = n_head
_a : Any = mem_len
_a : Tuple = same_length
_a : int = attn_type
_a : List[str] = clamp_len
_a : Any = sample_softmax
_a : List[str] = adaptive
_a : Any = dropout
_a : List[str] = dropatt
_a : str = untie_r
_a : Optional[int] = init
_a : List[Any] = init_range
_a : Tuple = proj_init_std
_a : Any = init_std
_a : Union[str, Any] = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 358 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
_a : Tuple = sorted(string.lower() )
return len(lowerCAmelCase_ ) == len(set(lowerCAmelCase_ ) )
if __name__ == "__main__":
__lowerCAmelCase = input('''Enter a string ''').strip()
__lowerCAmelCase = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 358 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : Any = {"""vocab_file""": """spiece.model"""}
__magic_name__ : List[Any] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
}
}
__magic_name__ : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
# Segments (not really needed)
__magic_name__ : Optional[Any] = 0
__magic_name__ : str = 1
__magic_name__ : str = 2
__magic_name__ : Optional[int] = 3
__magic_name__ : Any = 4
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Union[str, Any] = 'left'
def __init__( self , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<sep>" , lowerCamelCase="<pad>" , lowerCamelCase="<cls>" , lowerCamelCase="<mask>" , lowerCamelCase=["<eop>", "<eod>"] , lowerCamelCase = None , **lowerCamelCase , ):
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_snake_case = 3
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def UpperCamelCase( self ):
return len(self.sp_model )
def UpperCamelCase( self ):
_snake_case = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self , lowerCamelCase ):
_snake_case = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase( self , lowerCamelCase ):
if self.remove_space:
_snake_case = " ".join(inputs.strip().split() )
else:
_snake_case = inputs
_snake_case = outputs.replace("``" , "\"" ).replace("\'\'" , "\"" )
if not self.keep_accents:
_snake_case = unicodedata.normalize("NFKD" , _a )
_snake_case = "".join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
_snake_case = outputs.lower()
return outputs
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = self.preprocess_text(_a )
_snake_case = self.sp_model.encode(_a , out_type=_a )
_snake_case = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case = cur_pieces[1:]
else:
_snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def UpperCamelCase( self , lowerCamelCase ):
return self.sp_model.PieceToId(_a )
def UpperCamelCase( self , lowerCamelCase ):
return self.sp_model.IdToPiece(_a )
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = "".join(_a ).replace(_a , " " ).strip()
return out_string
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = True , **lowerCamelCase , ):
_snake_case = kwargs.pop("use_source_tokenizer" , _a )
_snake_case = self.convert_ids_to_tokens(_a , skip_special_tokens=_a )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_snake_case = []
_snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
_snake_case = []
sub_texts.append(_a )
else:
current_sub_text.append(_a )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_a ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_snake_case = "".join(_a )
_snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_snake_case = self.clean_up_tokenization(_a )
return clean_text
else:
return text
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1, 1]
return ([0] * len(_a )) + [1, 1]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
_snake_case = [self.sep_token_id]
_snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase = None ):
if not os.path.isdir(_a ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , "wb" ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 721 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=400 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , lowerCamelCase=1 / 255 , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_snake_case = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = do_normalize
_snake_case = image_mean
_snake_case = image_std
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_pad
def UpperCamelCase( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
_snake_case = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
_snake_case , _snake_case = image.size
else:
_snake_case , _snake_case = image.shape[1], image.shape[2]
if w < h:
_snake_case = int(self.size["shortest_edge"] * h / w )
_snake_case = self.size["shortest_edge"]
elif w > h:
_snake_case = self.size["shortest_edge"]
_snake_case = int(self.size["shortest_edge"] * w / h )
else:
_snake_case = self.size["shortest_edge"]
_snake_case = self.size["shortest_edge"]
else:
_snake_case = []
for image in image_inputs:
_snake_case , _snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
_snake_case = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = DetaImageProcessor if is_vision_available() else None
def UpperCamelCase( self ):
_snake_case = DetaImageProcessingTester(self )
@property
def UpperCamelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase( self ):
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
def UpperCamelCase( self ):
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1_333} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def UpperCamelCase( self ):
pass
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase( self ):
# Initialize image_processing
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
_snake_case , _snake_case = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase( self ):
# prepare image and target
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"image_id": 39_769, "annotations": target}
# encode them
_snake_case = DetaImageProcessor()
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def UpperCamelCase( self ):
# prepare image, target and masks_path
_snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_snake_case = json.loads(f.read() )
_snake_case = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target}
_snake_case = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_snake_case = DetaImageProcessor(format="coco_panoptic" )
_snake_case = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
_snake_case = torch.Size([1, 3, 800, 1_066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
_snake_case = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
_snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
_snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
_snake_case = torch.tensor([39_769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
_snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
_snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
_snake_case = 822_873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
_snake_case = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
_snake_case = torch.tensor([800, 1_066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 368 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : str = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : int ):
'''simple docstring'''
snake_case_ : Dict = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
snake_case_ : Optional[Any] = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __UpperCamelCase )
if matches:
snake_case_ : Optional[Any] = float(matches[1] )
snake_case_ : Tuple = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
snake_case_ : Dict = 1_0_0_1
snake_case_ : Dict = """imagenet-1k-id2label.json"""
snake_case_ : int = """huggingface/label-files"""
snake_case_ : List[Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
snake_case_ : Optional[int] = {int(__UpperCamelCase ) + 1: v for k, v in idalabel.items()}
snake_case_ : List[str] = """background"""
snake_case_ : str = idalabel
snake_case_ : Any = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case_ : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int]=False ):
'''simple docstring'''
snake_case_ : int = get_mobilenet_va_config(__UpperCamelCase )
# Load 🤗 model
snake_case_ : Optional[Any] = MobileNetVaForImageClassification(__UpperCamelCase ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
snake_case_ : str = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 3_2} , )
snake_case_ : Dict = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ : Any = model(**__UpperCamelCase )
snake_case_ : Optional[int] = outputs.logits
assert logits.shape == (1, 1_0_0_1)
if model_name == "mobilenet_v1_1.0_224":
snake_case_ : str = torch.tensor([-4.1_739, -1.1_233, 3.1_205] )
elif model_name == "mobilenet_v1_0.75_192":
snake_case_ : List[str] = torch.tensor([-3.9_440, -2.3_141, -0.3_333] )
else:
snake_case_ : Union[str, Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
snake_case_ : str = """google/""" + model_name
image_processor.push_to_hub(__UpperCamelCase )
model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''mobilenet_v1_1.0_224''',
type=str,
help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''',
)
parser.add_argument(
'''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 58 |
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : str = RobertaEmbeddings(_lowercase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , SCREAMING_SNAKE_CASE__ , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = RobertaConfig
_lowerCamelCase = '''roberta'''
def __init__( self , _lowercase ) -> List[Any]:
'''simple docstring'''
super().__init__(_lowercase )
snake_case_ : Optional[Any] = config.num_labels
snake_case_ : Dict = config.num_hidden_layers
snake_case_ : str = DeeRobertaModel(_lowercase )
snake_case_ : Dict = nn.Dropout(config.hidden_dropout_prob )
snake_case_ : List[str] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(_lowercase )
def UpperCAmelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=-1 , _lowercase=False , ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = self.num_layers
try:
snake_case_ : int = self.roberta(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , )
snake_case_ : str = outputs[1]
snake_case_ : Union[str, Any] = self.dropout(_lowercase )
snake_case_ : Tuple = self.classifier(_lowercase )
snake_case_ : Dict = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case_ : List[Any] = e.message
snake_case_ : Union[str, Any] = e.exit_layer
snake_case_ : Dict = outputs[0]
if not self.training:
snake_case_ : Dict = entropy(_lowercase )
snake_case_ : Optional[int] = []
snake_case_ : Union[str, Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case_ : Dict = MSELoss()
snake_case_ : Dict = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Union[str, Any] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case_ : int = []
for highway_exit in outputs[-1]:
snake_case_ : Tuple = highway_exit[0]
if not self.training:
highway_logits_all.append(_lowercase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case_ : Optional[int] = MSELoss()
snake_case_ : Optional[Any] = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case_ : Optional[int] = CrossEntropyLoss()
snake_case_ : Union[str, Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_lowercase )
if train_highway:
snake_case_ : Dict = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case_ : List[str] = (loss,) + outputs
if not self.training:
snake_case_ : Optional[Any] = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case_ : Tuple = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 58 | 1 |
'''simple docstring'''
import json
import sys
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(__snake_case , encoding="utf-8" ) as f:
a_ =json.load(__snake_case )
a_ =["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(__snake_case ):
a_ =results[benchmark_name]
a_ =benchmark_name.split("/" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
a_ ="| metric |"
a_ ="|--------|"
a_ ="| new / old (diff) |"
for metric_name in sorted(__snake_case ):
a_ =benchmark_res[metric_name]
a_ =metric_vals["new"]
a_ =metric_vals.get("old" , __snake_case )
a_ =metric_vals.get("diff" , __snake_case )
a_ =F""" {new_val:f}""" if isinstance(__snake_case , (int, float) ) else "None"
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(__snake_case , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(__snake_case , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(__snake_case , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(__snake_case ) )
if __name__ == "__main__":
lowercase = sys.argv[1]
lowercase = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 704 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
lowercase = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
lowercase = {
'''abeja/gpt-neox-japanese-2.7b''': 2_048,
}
def UpperCAmelCase_ ( lowercase__ , lowercase__ ):
'''simple docstring'''
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =json.loads(f.read() )
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
a_ =collections.OrderedDict()
with open(lowercase__ , "r" , encoding="utf-8" ) as f:
a_ =f.readlines()
a_ =[[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(lowercase__ ):
a_ =b
a_ =idx
for wd in b:
a_ =idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase ( __a):
'''simple docstring'''
__magic_name__ : Optional[int] = VOCAB_FILES_NAMES
__magic_name__ : str = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_="<|startoftext|>" , lowerCAmelCase_="<|endoftext|>" , lowerCAmelCase_=False , **lowerCAmelCase_ , ) -> List[Any]:
"""simple docstring"""
super().__init__(
unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , )
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(lowerCAmelCase_):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
a_ =do_clean_text
a_ , a_ , a_ , a_ =load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_)
a_ =SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji)
@property
def lowercase_ ( self) -> int:
"""simple docstring"""
return len(self.raw_vocab)
def lowercase_ ( self) -> Optional[Any]:
"""simple docstring"""
return dict(self.raw_vocab , **self.added_tokens_encoder)
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[int]:
"""simple docstring"""
return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token))
def lowercase_ ( self , lowerCAmelCase_) -> List[str]:
"""simple docstring"""
return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_)
def lowercase_ ( self , lowerCAmelCase_) -> Optional[Any]:
"""simple docstring"""
a_ ="".join(lowerCAmelCase_).strip()
return out_string
def lowercase_ ( self , lowerCAmelCase_) -> List[int]:
"""simple docstring"""
a_ =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_) + [self.eos_token_id])
if len(lowerCAmelCase_) > self.model_max_length:
a_ =input_ids[-self.model_max_length :]
return input_ids
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None) -> Tuple[str]:
"""simple docstring"""
a_ =0
if os.path.isdir(lowerCAmelCase_):
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
a_ =os.path.join(
lowerCAmelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
a_ =(
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
" Please check that the vocabulary is not corrupted!")
a_ =token_index
writer.write(",".join(lowerCAmelCase_) + "\n")
index += 1
with open(lowerCAmelCase_ , "w" , encoding="utf-8") as writer:
json.dump(self.emoji , lowerCAmelCase_)
return vocab_file, emoji_file
class UpperCAmelCase ( __a):
'''simple docstring'''
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_) -> str:
"""simple docstring"""
a_ =vocab # same as swe
a_ =ids_to_tokens # same as bpe
a_ =emoji
a_ =np.max([len(lowerCAmelCase_) for w in self.vocab.keys()])
a_ =re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
a_ =re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
a_ =re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
a_ =re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
a_ =re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
a_ ="─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
a_ ="▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
a_ =str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self) -> Tuple:
"""simple docstring"""
return len(self.ids_to_tokens)
def lowercase_ ( self , lowerCAmelCase_) -> Any:
"""simple docstring"""
a_ =self.content_repattera.sub("<URL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<EMAIL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<TEL>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<DATE>" , lowerCAmelCase_)
a_ =self.content_repattera.sub("<PRICE>" , lowerCAmelCase_)
a_ =content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
a_ =content.replace("<BLOCK><BLOCK>" , "<BLOCK>")
return content
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_=False) -> Union[str, Any]:
"""simple docstring"""
a_ =text.replace(" " , "<SP>")
a_ =text.replace(" " , "<SP>")
a_ =text.replace("\r\n" , "<BR>")
a_ =text.replace("\n" , "<BR>")
a_ =text.replace("\r" , "<BR>")
a_ =text.replace("\t" , "<TAB>")
a_ =text.replace("—" , "ー")
a_ =text.replace("−" , "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
a_ =text.replace(lowerCAmelCase_ , lowerCAmelCase_)
if clean:
a_ =self.clean_text(lowerCAmelCase_)
def check_simbol(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 2:
a_ =(int(e[0]) << 8) + int(e[1])
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(lowerCAmelCase_):
a_ =x.encode()
if len(lowerCAmelCase_) == 1 and len(lowerCAmelCase_) == 3:
a_ =(int(e[0]) << 1_6) + (int(e[1]) << 8) + int(e[2])
if c >= 0xe2_8080 and c <= 0xe2_b07f:
return True
return False
a_ =0
a_ =[]
while pos < len(lowerCAmelCase_):
a_ =min(len(lowerCAmelCase_) , pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
a_ =[] # (token_id, token, pos)
for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1):
a_ =text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(lowerCAmelCase_) > 2:
a_ =[(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(lowerCAmelCase_) > 0:
# the smallest token_id is adopted
a_ , a_ , a_ =sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_: x[0])[0]
result.append(lowerCAmelCase_)
a_ =e
else:
a_ =pos + 1
a_ =text[pos:end]
if check_simbol(lowerCAmelCase_):
result.append("<KIGOU>")
elif checkuae(lowerCAmelCase_):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
a_ =end
return result
def lowercase_ ( self , lowerCAmelCase_ , lowerCAmelCase_="\n") -> List[Any]:
"""simple docstring"""
a_ =[]
a_ =[]
a_ =self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ =[]
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(lowerCAmelCase_)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(lowerCAmelCase_)
if len(lowerCAmelCase_) > 0:
words.append(bytearray(lowerCAmelCase_).decode("utf-8" , errors="replace"))
a_ ="".join(lowerCAmelCase_)
return text
| 41 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : str = field(default='text-classification' ,metadata={'include_in_asdict_even_if_is_default': True} )
__lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
__lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
__lowercase : str = "text"
__lowercase : str = "labels"
def SCREAMING_SNAKE_CASE__ ( self:List[str] , _a:str ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , _a ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
snake_case__ = copy.deepcopy(self )
snake_case__ = self.label_schema.copy()
snake_case__ = features[self.label_column]
snake_case__ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE__ ( self:int ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 33 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case = None
try:
import msvcrt
except ImportError:
__snake_case = None
try:
import fcntl
except ImportError:
__snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case = OSError
# Data
# ------------------------------------------------
__snake_case = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__snake_case = '3.0.12'
__snake_case = None
def _lowerCamelCase ( ):
global _logger
lowercase__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
lowercase__ : Union[str, Any] = lock_file
return None
def __str__( self ) -> List[Any]:
lowercase__ : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : str = lock
return None
def __enter__( self ) -> List[Any]:
return self.lock
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
self.lock.release()
return None
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Optional[Any]:
lowercase__ : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ : Union[str, Any] = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
lowercase__ : int = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ : Dict = None
# The default timeout value.
lowercase__ : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
lowercase__ : Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ : Union[str, Any] = 0
return None
@property
def UpperCAmelCase__( self ) -> List[str]:
return self._lock_file
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
return self._timeout
@timeout.setter
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Union[str, Any] = float(lowerCamelCase__ )
return None
def UpperCAmelCase__( self ) -> Tuple:
raise NotImplementedError()
def UpperCAmelCase__( self ) -> Tuple:
raise NotImplementedError()
@property
def UpperCAmelCase__( self ) -> str:
return self._lock_file_fd is not None
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=0.05 ) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ : int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ : Tuple = id(self )
lowercase__ : Any = self._lock_file
lowercase__ : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ : Any = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__( self , lowerCamelCase__=False ) -> int:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ : Tuple = id(self )
lowercase__ : int = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ : str = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
self.release()
return None
def __del__( self ) -> int:
self.release(force=lowerCamelCase__ )
return None
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
lowercase__ : Optional[int] = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
lowercase__ : Union[str, Any] = os.path.dirname(lowerCamelCase__ )
lowercase__ : List[Any] = str(hash(lowerCamelCase__ ) )
lowercase__ : Optional[int] = filename[: max_length - len(lowerCamelCase__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Tuple:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
lowercase__ : List[Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ : Dict = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ : Optional[Any] = fd
return None
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : int = self._lock_file_fd
lowercase__ : Any = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> List[str]:
lowercase__ : Optional[Any] = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ : List[Any] = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ : Any = fd
return None
def UpperCAmelCase__( self ) -> str:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ : Optional[int] = self._lock_file_fd
lowercase__ : Optional[Any] = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ : Any = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
lowercase__ : Union[str, Any] = fd
return None
def UpperCAmelCase__( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case = None
if msvcrt:
__snake_case = WindowsFileLock
elif fcntl:
__snake_case = UnixFileLock
else:
__snake_case = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available') | 200 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
from typing import Any
import requests
lowercase__ ='https://api.github.com'
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowercase__ =BASE_URL + '/user'
# https://github.com/settings/tokens
lowercase__ =os.environ.get('USER_TOKEN', '')
def UpperCamelCase_ ( A__ ):
a_ = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(A__ , headers=A__ ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(F"""{key}: {value}""")
else:
raise ValueError('\'USER_TOKEN\' field cannot be empty.')
| 511 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
lowercase__ =logging.getLogger(__name__)
@dataclass
class a_ :
lowerCamelCase__ : str
lowerCamelCase__ : List[str]
lowerCamelCase__ : Optional[List[str]]
@dataclass
class a_ :
lowerCamelCase__ : List[int]
lowerCamelCase__ : List[int]
lowerCamelCase__ : Optional[List[int]] = None
lowerCamelCase__ : Optional[List[int]] = None
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : Any = 'train'
lowerCamelCase__ : Optional[int] = 'dev'
lowerCamelCase__ : int = 'test'
class a_ :
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase ):
raise NotImplementedError
@staticmethod
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase="[CLS]" , UpperCAmelCase=1 , UpperCAmelCase="[SEP]" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=0 , UpperCAmelCase=0 , UpperCAmelCase=-1_00 , UpperCAmelCase=0 , UpperCAmelCase=True , ):
a_ = {label: i for i, label in enumerate(UpperCAmelCase )}
a_ = []
for ex_index, example in enumerate(UpperCAmelCase ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , UpperCAmelCase , len(UpperCAmelCase ) )
a_ = []
a_ = []
for word, label in zip(example.words , example.labels ):
a_ = tokenizer.tokenize(UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCAmelCase ) > 0:
tokens.extend(UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
a_ = tokenizer.num_special_tokens_to_add()
if len(UpperCAmelCase ) > max_seq_length - special_tokens_count:
a_ = tokens[: (max_seq_length - special_tokens_count)]
a_ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
a_ = [sequence_a_segment_id] * len(UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
a_ = [cls_token] + tokens
a_ = [pad_token_label_id] + label_ids
a_ = [cls_token_segment_id] + segment_ids
a_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
a_ = [1 if mask_padding_with_zero else 0] * len(UpperCAmelCase )
# Zero-pad up to the sequence length.
a_ = max_seq_length - len(UpperCAmelCase )
if pad_on_left:
a_ = ([pad_token] * padding_length) + input_ids
a_ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
a_ = ([pad_token_segment_id] * padding_length) + segment_ids
a_ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCAmelCase ) == max_seq_length
assert len(UpperCAmelCase ) == max_seq_length
assert len(UpperCAmelCase ) == max_seq_length
assert len(UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(UpperCAmelCase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(UpperCAmelCase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
a_ = None
features.append(
InputFeatures(
input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , label_ids=UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : List[InputFeatures]
lowerCamelCase__ : int = nn.CrossEntropyLoss().ignore_index
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=False , UpperCAmelCase = Split.train , ):
# Load data features from cache or dataset file
a_ = os.path.join(
UpperCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a_ = cached_features_file + """.lock"""
with FileLock(UpperCAmelCase ):
if os.path.exists(UpperCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
a_ = torch.load(UpperCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
a_ = token_classification_task.read_examples_from_file(UpperCAmelCase , UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a_ = token_classification_task.convert_examples_to_features(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f'''Saving features into cached file {cached_features_file}''' )
torch.save(self.features , UpperCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class a_ :
lowerCamelCase__ : List[InputFeatures]
lowerCamelCase__ : int = -100
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=False , UpperCAmelCase = Split.train , ):
a_ = token_classification_task.read_examples_from_file(UpperCAmelCase , UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
a_ = token_classification_task.convert_examples_to_features(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
a_ = tf.data.Dataset.from_generator(
UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
a_ = tf.data.Dataset.from_generator(
UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCAmelCase__ ( self ):
a_ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , UpperCAmelCase ):
return self.features[i]
| 511 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class _UpperCAmelCase ( _A ):
SCREAMING_SNAKE_CASE_ : Dict = "openai/whisper-base"
SCREAMING_SNAKE_CASE_ : Any = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
SCREAMING_SNAKE_CASE_ : str = "transcriber"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WhisperProcessor
SCREAMING_SNAKE_CASE_ : Any = WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE_ : int = ["audio"]
SCREAMING_SNAKE_CASE_ : Any = ["text"]
def A ( self : Optional[Any] , A : Dict ) -> Any:
return self.pre_processor(A , return_tensors='''pt''' ).input_features
def A ( self : Optional[Any] , A : Dict ) -> Any:
return self.model.generate(inputs=A )
def A ( self : List[str] , A : str ) -> Union[str, Any]:
return self.pre_processor.batch_decode(A , skip_special_tokens=A )[0]
| 231 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A : str = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[Any] = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 231 | 1 |
'''simple docstring'''
from __future__ import annotations
def A ( _UpperCAmelCase : float ,_UpperCAmelCase : float ,_UpperCAmelCase : float ,) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
A_ = logging.getLogger()
def A ( _UpperCAmelCase : Path ,_UpperCAmelCase : list ) -> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = '\n'.join(_UpperCAmelCase )
Path(_UpperCAmelCase ).open('w' ).writelines(_UpperCAmelCase )
A_ = "patrickvonplaten/t5-tiny-random"
A_ = "sshleifer/bart-tiny-random"
A_ = "sshleifer/tiny-mbart"
A_ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class UpperCamelCase__ ( a ):
'''simple docstring'''
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase : Dict = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__lowerCAmelCase : List[str] = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__lowerCAmelCase : Any = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__lowerCAmelCase : Optional[Any] = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__lowerCAmelCase : Tuple = F"""
run_eval_search.py
{model}
{input_file_name}
{output_file_name}
--score_path {score_path}
--task {task}
--num_beams 2
--length_penalty 2.0
""".split()
with patch.object(SCREAMING_SNAKE_CASE , 'argv' , SCREAMING_SNAKE_CASE ):
run_generate()
assert Path(SCREAMING_SNAKE_CASE ).exists()
# os.remove(Path(output_file_name))
def snake_case ( self ) -> int:
self.run_eval_tester(SCREAMING_SNAKE_CASE )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> int:
self.run_eval_tester(SCREAMING_SNAKE_CASE )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__lowerCAmelCase : Union[str, Any] = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__lowerCAmelCase : int = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__lowerCAmelCase : Dict = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__lowerCAmelCase : Dict = Path(self.get_auto_remove_tmp_dir() )
__lowerCAmelCase : Optional[Any] = str(tmp_dir / 'scores.json' )
__lowerCAmelCase : Optional[Any] = str(tmp_dir / 'val.target' )
_dump_articles(SCREAMING_SNAKE_CASE , text['en'] )
_dump_articles(SCREAMING_SNAKE_CASE , text['de'] )
__lowerCAmelCase : Dict = 'translation_en_to_de' if model == T5_TINY else 'summarization'
__lowerCAmelCase : Any = F"""
run_eval_search.py
{model}
{str(SCREAMING_SNAKE_CASE )}
{str(SCREAMING_SNAKE_CASE )}
--score_path {score_path}
--reference_path {reference_path}
--task {task}
""".split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(SCREAMING_SNAKE_CASE , 'argv' , SCREAMING_SNAKE_CASE ):
with CaptureStdout() as cs:
run_search()
__lowerCAmelCase : List[str] = [' num_beams | length_penalty', model, 'Best score args']
__lowerCAmelCase : List[Any] = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(SCREAMING_SNAKE_CASE )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(SCREAMING_SNAKE_CASE ).exists()
os.remove(Path(SCREAMING_SNAKE_CASE ) )
| 123 | 0 |
'''simple docstring'''
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=14 , UpperCamelCase__ : List[str]=7 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : int=37 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : List[Any]=0.02 , ) -> Optional[Any]:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_input_mask
_UpperCamelCase =use_token_type_ids
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =rotary_dim
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =initializer_range
_UpperCamelCase =None
_UpperCamelCase =vocab_size - 1
_UpperCamelCase =vocab_size - 1
_UpperCamelCase =vocab_size - 1
def UpperCamelCase__ ( self : Dict ) -> Any:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =None
if self.use_input_mask:
_UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase =GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def UpperCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase =self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase =config_and_inputs
_UpperCamelCase ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def UpperCamelCase__ ( self : str , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ) -> Dict:
_UpperCamelCase =20
_UpperCamelCase =model_class_name(UpperCamelCase__ )
_UpperCamelCase =model.init_cache(input_ids.shape[0] , UpperCamelCase__ )
_UpperCamelCase =jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_UpperCamelCase =model(
input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , )
_UpperCamelCase =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase =model(
input_ids[:, -1:] , attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCamelCase__ , )
_UpperCamelCase =model(UpperCamelCase__ )
_UpperCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ) -> List[str]:
_UpperCamelCase =20
_UpperCamelCase =model_class_name(UpperCamelCase__ )
_UpperCamelCase =jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
_UpperCamelCase =model.init_cache(input_ids.shape[0] , UpperCamelCase__ )
_UpperCamelCase =jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
_UpperCamelCase =model(
input_ids[:, :-1] , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , position_ids=UpperCamelCase__ , )
_UpperCamelCase =jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase =model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCamelCase__ , position_ids=UpperCamelCase__ , )
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
_UpperCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase_ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def UpperCamelCase__ ( self : Any ) -> List[Any]:
_UpperCamelCase =FlaxGPTJModelTester(self )
def UpperCamelCase__ ( self : Union[str, Any] ) -> Tuple:
for model_class_name in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__ ( self : Dict ) -> List[str]:
for model_class_name in self.all_model_classes:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
@tooslow
def UpperCamelCase__ ( self : int ) -> str:
_UpperCamelCase =GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
_UpperCamelCase =tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )
_UpperCamelCase =FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
_UpperCamelCase =False
_UpperCamelCase =model.config.eos_token_id
_UpperCamelCase =jax.jit(model.generate )
_UpperCamelCase =jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
_UpperCamelCase =tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
_UpperCamelCase =[
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self : Dict ) -> List[Any]:
_UpperCamelCase , _UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_UpperCamelCase =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_UpperCamelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCamelCase =getattr(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase , _UpperCamelCase =pt_inputs['''input_ids'''].shape
_UpperCamelCase =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
_UpperCamelCase =0
_UpperCamelCase =1
_UpperCamelCase =0
_UpperCamelCase =1
_UpperCamelCase =pt_model_class(UpperCamelCase__ ).eval()
_UpperCamelCase =model_class(UpperCamelCase__ , dtype=jnp.floataa )
_UpperCamelCase =convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCamelCase__ )
_UpperCamelCase =fx_state
with torch.no_grad():
_UpperCamelCase =pt_model(**UpperCamelCase__ ).to_tuple()
_UpperCamelCase =fx_model(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =model_class.from_pretrained(UpperCamelCase__ , from_pt=UpperCamelCase__ )
_UpperCamelCase =fx_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(
len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self : List[str] ) -> Tuple:
_UpperCamelCase , _UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
_UpperCamelCase =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase ={k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
_UpperCamelCase =model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCamelCase =getattr(UpperCamelCase__ , UpperCamelCase__ )
_UpperCamelCase =pt_model_class(UpperCamelCase__ ).eval()
_UpperCamelCase =model_class(UpperCamelCase__ , dtype=jnp.floataa )
_UpperCamelCase =load_flax_weights_in_pytorch_model(UpperCamelCase__ , fx_model.params )
_UpperCamelCase , _UpperCamelCase =pt_inputs['''input_ids'''].shape
_UpperCamelCase =np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase__ ):
_UpperCamelCase =0
_UpperCamelCase =1
_UpperCamelCase =0
_UpperCamelCase =1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
_UpperCamelCase =pt_model(**UpperCamelCase__ ).to_tuple()
_UpperCamelCase =fx_model(**UpperCamelCase__ ).to_tuple()
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCamelCase__ )
_UpperCamelCase =pt_model_class.from_pretrained(UpperCamelCase__ , from_flax=UpperCamelCase__ )
with torch.no_grad():
_UpperCamelCase =pt_model_loaded(**UpperCamelCase__ ).to_tuple()
self.assertEqual(
len(UpperCamelCase__ ) , len(UpperCamelCase__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def UpperCamelCase__ ( self : Optional[int] ) -> List[str]:
for model_class_name in self.all_model_classes:
_UpperCamelCase =model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
_UpperCamelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
| 404 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __lowerCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE ):
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =load_tool('text-question-answering' )
self.tool.setup()
_lowercase =load_tool('text-question-answering' , remote=lowerCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =self.tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase =self.remote_tool(lowerCAmelCase , 'What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.remote_tool(text=lowerCAmelCase , question='What did Hugging Face do in April 2021?' )
self.assertEqual(lowerCAmelCase , 'launched the BigScience Research Workshop' )
| 291 | 0 |
import os
from collections.abc import Iterator
def UpperCamelCase( lowercase_ = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(_lowercase ):
snake_case_ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"):
yield os.path.join(_lowercase , _lowercase ).lstrip("""./""" )
def UpperCamelCase( lowercase_ ) -> Tuple:
'''simple docstring'''
return f'''{i * " "}*''' if i else "\n##"
def UpperCamelCase( lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
snake_case_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(_lowercase )} {new_part.replace("_" , " " ).title()}''' )
return new_path
def UpperCamelCase( lowercase_ = "." ) -> None:
'''simple docstring'''
snake_case_ = ""
for filepath in sorted(good_file_paths(_lowercase ) ):
snake_case_ = os.path.split(_lowercase )
if filepath != old_path:
snake_case_ = print_path(_lowercase , _lowercase )
snake_case_ = (filepath.count(os.sep ) + 1) if filepath else 0
snake_case_ = f'''{filepath}/{filename}'''.replace(""" """ , """%20""" )
snake_case_ = os.path.splitext(filename.replace("""_""" , """ """ ).title() )[0]
print(f'''{md_prefix(_lowercase )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md('''.''') | 709 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''BeitFeatureExtractor''']
lowerCamelCase_ = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 161 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : str = seq_length
UpperCAmelCase__ : List[str] = is_training
UpperCAmelCase__ : str = use_attention_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Union[str, Any] = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : Optional[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : Optional[int] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : Optional[int] = num_choices
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : List[Any] = None
if self.use_attention_mask:
UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : Dict = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self):
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def snake_case__ ( self):
UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = config_and_inputs
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
UpperCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :List[Any] = True
lowerCAmelCase :int = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = FlaxBertModelTester(self)
@slow
def snake_case__ ( self):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
UpperCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained("""bert-base-cased""")
UpperCAmelCase__ : Optional[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(_lowerCamelCase) | 407 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 2_0_0_0_0_0_0 ):
UpperCAmelCase__ : Any = [0 for i in range(n + 1 )]
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : List[str] = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , UpperCamelCase__ ):
UpperCAmelCase__ : Optional[Any] = 1
UpperCAmelCase__ : Any = 0
for i in range(UpperCamelCase__ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f"""{solution() = }""") | 407 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def A ( self , snake_case_ ) -> str:
'''simple docstring'''
raise NotImplementedError()
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = False , **snake_case_ ) -> Optional[Any]:
'''simple docstring'''
__lowercase = tokenizer
__lowercase = skip_prompt
__lowercase = decode_kwargs
# variables used in the streaming process
__lowercase = []
__lowercase = 0
__lowercase = True
def A ( self , snake_case_ ) -> int:
'''simple docstring'''
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
__lowercase = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__lowercase = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
__lowercase = text[self.print_len :]
__lowercase = []
__lowercase = 0
# If the last token is a CJK character, we print the characters.
elif len(snake_case_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__lowercase = text[self.print_len :]
self.print_len += len(snake_case_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__lowercase = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(snake_case_ )
self.on_finalized_text(snake_case_ )
def A ( self ) -> Any:
'''simple docstring'''
if len(self.token_cache ) > 0:
__lowercase = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__lowercase = text[self.print_len :]
__lowercase = []
__lowercase = 0
else:
__lowercase = ''''''
__lowercase = True
self.on_finalized_text(snake_case_ , stream_end=snake_case_ )
def A ( self , snake_case_ , snake_case_ = False ) -> Dict:
'''simple docstring'''
print(snake_case_ , flush=snake_case_ , end='''''' if not stream_end else None )
def A ( self , snake_case_ ) -> Dict:
'''simple docstring'''
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
class lowerCamelCase_ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = False , snake_case_ = None , **snake_case_ ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(snake_case_ , snake_case_ , **snake_case_ )
__lowercase = Queue()
__lowercase = None
__lowercase = timeout
def A ( self , snake_case_ , snake_case_ = False ) -> Tuple:
'''simple docstring'''
self.text_queue.put(snake_case_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self ) -> List[str]:
'''simple docstring'''
return self
def A ( self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 527 |
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase = ["onnx"]
def __init__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''onnx'''] )
@classmethod
def A ( cls , *snake_case_ , **snake_case_ ) -> int:
'''simple docstring'''
requires_backends(cls , ['''onnx'''] )
@classmethod
def A ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''onnx'''] )
| 527 | 1 |
import argparse
from collections import defaultdict
import yaml
a_ = 'docs/source/en/_toctree.yml'
def _a ( UpperCamelCase_ : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = defaultdict(lowercase__ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(lowercase__ )
lowerCAmelCase__ = new_doc_list
lowerCAmelCase__ = [key for key, value in counts.items() if value > 1]
lowerCAmelCase__ = []
for duplicate_key in duplicates:
lowerCAmelCase__ = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(lowercase__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
lowerCAmelCase__ = sorted(lowercase__ , key=lambda UpperCamelCase_ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowercase__ ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(lowercase__ )
# Sort
return overview_doc
def _a ( UpperCamelCase_ : Tuple=False ) -> Optional[int]:
"""simple docstring"""
with open(lowercase__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase__ = content[api_idx]["sections"]
# Then to the model doc
lowerCAmelCase__ = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
lowerCAmelCase__ = api_doc[scheduler_idx]["sections"]
lowerCAmelCase__ = clean_doc_toc(lowercase__ )
lowerCAmelCase__ = False
if new_scheduler_doc != scheduler_doc:
lowerCAmelCase__ = True
if overwrite:
lowerCAmelCase__ = new_scheduler_doc
if diff:
if overwrite:
lowerCAmelCase__ = api_doc
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def _a ( UpperCamelCase_ : List[Any]=False ) -> Optional[Any]:
"""simple docstring"""
with open(lowercase__ , encoding="utf-8" ) as f:
lowerCAmelCase__ = yaml.safe_load(f.read() )
# Get to the API doc
lowerCAmelCase__ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowerCAmelCase__ = content[api_idx]["sections"]
# Then to the model doc
lowerCAmelCase__ = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
lowerCAmelCase__ = False
lowerCAmelCase__ = api_doc[pipeline_idx]["sections"]
lowerCAmelCase__ = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
lowerCAmelCase__ = pipeline_doc["section"]
lowerCAmelCase__ = clean_doc_toc(lowercase__ )
if overwrite:
lowerCAmelCase__ = new_sub_pipeline_doc
new_pipeline_docs.append(lowercase__ )
# sort overall pipeline doc
lowerCAmelCase__ = clean_doc_toc(lowercase__ )
if new_pipeline_docs != pipeline_docs:
lowerCAmelCase__ = True
if overwrite:
lowerCAmelCase__ = new_pipeline_docs
if diff:
if overwrite:
lowerCAmelCase__ = api_doc
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(lowercase__ , allow_unicode=lowercase__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 339 |
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def a_ ( lowercase__ :Union[dict, list, tuple, torch.Tensor] ):
__lowerCamelCase = []
if isinstance(lowercase__, lowercase__ ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase__ ) )
elif isinstance(lowercase__, torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("""Not supported""" )
return shapes
@torch.jit.ignore
def a_ ( lowercase__ :int, lowercase__ :Tuple[int, ...] ):
__lowerCamelCase = []
for d in reversed(lowercase__ ):
idx.append(flat_idx % d )
__lowerCamelCase = flat_idx // d
return tuple(reversed(lowercase__ ) )
@torch.jit.ignore
def a_ ( lowercase__ :Sequence[int], lowercase__ :Sequence[int], lowercase__ :Sequence[int], lowercase__ :Optional[Sequence[bool]] = None, lowercase__ :Optional[Sequence[bool]] = None, ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowercase__ :List[bool] ) -> None:
__lowerCamelCase = True
for i in range(len(lowercase__ ) ):
__lowerCamelCase = -1 * (i + 1)
l[reversed_idx] &= tally
__lowerCamelCase = l[reversed_idx]
if start_edges is None:
__lowerCamelCase = [s == 0 for s in start]
reduce_edge_list(lowercase__ )
if end_edges is None:
__lowerCamelCase = [e == (d - 1) for e, d in zip(lowercase__, lowercase__ )]
reduce_edge_list(lowercase__ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase__ ) == 0:
return [()]
elif len(lowercase__ ) == 1:
return [(slice(start[0], end[0] + 1 ),)]
__lowerCamelCase = []
__lowerCamelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase__, lowercase__ ):
if s == e:
path_list.append(slice(lowercase__, s + 1 ) )
else:
break
__lowerCamelCase = tuple(lowercase__ )
__lowerCamelCase = len(lowercase__ )
# start == end, and we're done
if divergence_idx == len(lowercase__ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase = start[divergence_idx]
return tuple(
path + (slice(lowercase__, sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :], [d - 1 for d in dims[divergence_idx + 1 :]], dims[divergence_idx + 1 :], start_edges=start_edges[divergence_idx + 1 :], end_edges=[True for _ in end_edges[divergence_idx + 1 :]], ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__lowerCamelCase = end[divergence_idx]
return tuple(
path + (slice(lowercase__, edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]], end[divergence_idx + 1 :], dims[divergence_idx + 1 :], start_edges=[True for _ in start_edges[divergence_idx + 1 :]], end_edges=end_edges[divergence_idx + 1 :], ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx], end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__lowerCamelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1, end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def a_ ( lowercase__ :torch.Tensor, lowercase__ :int, lowercase__ :int, lowercase__ :int ):
__lowerCamelCase = t.shape[:no_batch_dims]
__lowerCamelCase = list(_flat_idx_to_idx(lowercase__, lowercase__ ) )
# _get_minimal_slice_set is inclusive
__lowerCamelCase = list(_flat_idx_to_idx(flat_end - 1, lowercase__ ) )
# Get an ordered list of slices to perform
__lowerCamelCase = _get_minimal_slice_set(
lowercase__, lowercase__, lowercase__, )
__lowerCamelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def a_ ( lowercase__ :Callable, lowercase__ :Dict[str, Any], lowercase__ :int, lowercase__ :int, lowercase__ :bool = False, lowercase__ :Any = None, lowercase__ :bool = False, ):
if not (len(lowercase__ ) > 0):
raise ValueError("""Must provide at least one input""" )
__lowerCamelCase = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase__ )]
__lowerCamelCase = tuple([max(lowercase__ ) for s in zip(*lowercase__ )] )
def _prep_inputs(lowercase__ :torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__lowerCamelCase = t.reshape(-1, *t.shape[no_batch_dims:] )
else:
__lowerCamelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__lowerCamelCase = tensor_tree_map(_prep_inputs, lowercase__ )
__lowerCamelCase = None
if _out is not None:
__lowerCamelCase = tensor_tree_map(lambda lowercase__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ), _out )
__lowerCamelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__lowerCamelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase__ :torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__lowerCamelCase = 0
__lowerCamelCase = prepped_outputs
for _ in range(lowercase__ ):
# Chunk the input
if not low_mem:
__lowerCamelCase = _select_chunk
else:
__lowerCamelCase = partial(
_chunk_slice, flat_start=lowercase__, flat_end=min(lowercase__, i + chunk_size ), no_batch_dims=len(lowercase__ ), )
__lowerCamelCase = tensor_tree_map(lowercase__, lowercase__ )
# Run the layer on the chunk
__lowerCamelCase = layer(**lowercase__ )
# Allocate space for the output
if out is None:
__lowerCamelCase = tensor_tree_map(lambda lowercase__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ), lowercase__ )
# Put the chunk in its pre-allocated space
if isinstance(lowercase__, lowercase__ ):
def assign(lowercase__ :dict, lowercase__ :dict ) -> None:
for k, v in da.items():
if isinstance(lowercase__, lowercase__ ):
assign(lowercase__, da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__lowerCamelCase = da[k]
assign(lowercase__, lowercase__ )
elif isinstance(lowercase__, lowercase__ ):
for xa, xa in zip(lowercase__, lowercase__ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__lowerCamelCase = xa
elif isinstance(lowercase__, torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__lowerCamelCase = output_chunk
else:
raise ValueError("""Not supported""" )
i += chunk_size
__lowerCamelCase = tensor_tree_map(lambda lowercase__ : t.view(orig_batch_dims + t.shape[1:] ), lowercase__ )
return out
class __snake_case :
def __init__( self: Union[str, Any] , A_: int = 5_12 , ):
__lowerCamelCase = max_chunk_size
__lowerCamelCase = None
__lowerCamelCase = None
def __a ( self: Tuple , A_: Callable , A_: tuple , A_: int ):
logging.info("""Tuning chunk size...""" )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__lowerCamelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
__lowerCamelCase = [c for c in candidates if c > min_chunk_size]
__lowerCamelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A_: int ) -> bool:
try:
with torch.no_grad():
fn(*A_ , chunk_size=A_ )
return True
except RuntimeError:
return False
__lowerCamelCase = 0
__lowerCamelCase = len(A_ ) - 1
while i > min_viable_chunk_size_index:
__lowerCamelCase = test_chunk_size(candidates[i] )
if not viable:
__lowerCamelCase = (min_viable_chunk_size_index + i) // 2
else:
__lowerCamelCase = i
__lowerCamelCase = (i + len(A_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __a ( self: Tuple , A_: Iterable , A_: Iterable ):
__lowerCamelCase = True
for aa, aa in zip(A_ , A_ ):
assert type(A_ ) == type(A_ )
if isinstance(A_ , (list, tuple) ):
consistent &= self._compare_arg_caches(A_ , A_ )
elif isinstance(A_ , A_ ):
__lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
__lowerCamelCase = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
consistent &= self._compare_arg_caches(A_ , A_ )
else:
consistent &= aa == aa
return consistent
def __a ( self: str , A_: Callable , A_: tuple , A_: int , ):
__lowerCamelCase = True
__lowerCamelCase = tree_map(lambda A_ : a.shape if isinstance(A_ , torch.Tensor ) else a , A_ , A_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A_ )
__lowerCamelCase = self._compare_arg_caches(self.cached_arg_data , A_ )
else:
# Otherwise, we can reuse the precomputed value
__lowerCamelCase = False
if not consistent:
__lowerCamelCase = self._determine_favorable_chunk_size(
A_ , A_ , A_ , )
__lowerCamelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 281 | 0 |
'''simple docstring'''
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase : int = pd.read_csv(
'''https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'''
'''position_salaries.csv'''
)
_lowerCamelCase : List[Any] = dataset.iloc[:, 1:2].values
_lowerCamelCase : Optional[int] = dataset.iloc[:, 2].values
_lowerCamelCase : Optional[int] = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase : Dict = PolynomialFeatures(degree=4)
_lowerCamelCase : Tuple = poly_reg.fit_transform(X)
_lowerCamelCase : List[str] = LinearRegression()
pol_reg.fit(X_poly, y)
def _a ( ) -> List[str]:
'''simple docstring'''
plt.scatter(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , color="red" )
plt.plot(SCREAMING_SNAKE_CASE__ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE__ ) ) , color="blue" )
plt.title("Truth or Bluff (Linear Regression)" )
plt.xlabel("Position level" )
plt.ylabel("Salary" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 715 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def _a ( SCREAMING_SNAKE_CASE__ : SplitDict ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = split_dict._to_yaml_list()
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = SplitDict._from_yaml_list(SCREAMING_SNAKE_CASE__ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
SCREAMING_SNAKE_CASE__ : Dict = None
# the split name of split_dict takes over the name of the split info object
SCREAMING_SNAKE_CASE__ : List[str] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=SCREAMING_SNAKE_CASE__ ), SplitInfo(dataset_name="my_dataset" )] )
def _a ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 157 | 0 |
def a_ ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_lowerCamelCase : str =int(SCREAMING_SNAKE_CASE__ )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase , _lowerCamelCase : Union[str, Any] =divmod(SCREAMING_SNAKE_CASE__ , 2 )
return binary_recursive(SCREAMING_SNAKE_CASE__ ) + str(SCREAMING_SNAKE_CASE__ )
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =str(SCREAMING_SNAKE_CASE__ ).strip()
if not number:
raise ValueError('No input value was provided' )
_lowerCamelCase : str ='-' if number.startswith('-' ) else ''
_lowerCamelCase : str =number.lstrip('-' )
if not number.isnumeric():
raise ValueError('Input value is not an integer' )
return F'''{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE__ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 464 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class A ( UpperCamelCase_ ):
UpperCamelCase__ : List[str] =['pixel_values']
def __init__( self : Any , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : str , ) -> None:
"""simple docstring"""
super().__init__(**lowercase_ )
_lowerCamelCase : Optional[int] =size if size is not None else {'shortest_edge': 224}
_lowerCamelCase : List[Any] =get_size_dict(lowercase_ , default_to_square=lowercase_ )
_lowerCamelCase : str =crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : str =get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size' )
_lowerCamelCase : Dict =do_resize
_lowerCamelCase : int =size
_lowerCamelCase : Optional[Any] =resample
_lowerCamelCase : Optional[int] =do_center_crop
_lowerCamelCase : Tuple =crop_size
_lowerCamelCase : Optional[int] =do_rescale
_lowerCamelCase : Optional[Any] =rescale_factor
_lowerCamelCase : Union[str, Any] =do_normalize
_lowerCamelCase : Optional[int] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowerCamelCase : Any =image_std if image_std is not None else OPENAI_CLIP_STD
_lowerCamelCase : Any =do_convert_rgb
def lowerCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase : int =get_size_dict(lowercase_ , default_to_square=lowercase_ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowerCamelCase : Union[str, Any] =get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> str:
"""simple docstring"""
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Tuple , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def lowerCamelCase ( self : List[str] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] =do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[str] =size if size is not None else self.size
_lowerCamelCase : Any =get_size_dict(lowercase_ , param_name='size' , default_to_square=lowercase_ )
_lowerCamelCase : str =resample if resample is not None else self.resample
_lowerCamelCase : List[Any] =do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : Optional[Any] =crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Dict =get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_ )
_lowerCamelCase : str =do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : List[str] =do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Tuple =image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : int =image_std if image_std is not None else self.image_std
_lowerCamelCase : Tuple =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowerCamelCase : Any =make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowerCamelCase : Tuple =[convert_to_rgb(lowercase_ ) for image in images]
# All transformations expect numpy arrays.
_lowerCamelCase : Optional[Any] =[to_numpy_array(lowercase_ ) for image in images]
if do_resize:
_lowerCamelCase : Optional[Any] =[self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
_lowerCamelCase : Optional[int] =[self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
_lowerCamelCase : Optional[Any] =[self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
_lowerCamelCase : List[Any] =[self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
_lowerCamelCase : List[str] =[to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
_lowerCamelCase : Tuple ={'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 464 | 1 |
import argparse
_lowerCAmelCase : Tuple = "docs/source/_static/js/custom.js"
def __UpperCamelCase ( _A : List[Any] ) -> Any:
"""simple docstring"""
with open(_A , encoding='utf-8' , newline='\n' ) as f:
lowerCAmelCase : Tuple = f.readlines()
lowerCAmelCase : str = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
lowerCAmelCase : int = F"const stableVersion = \"v{version}\"\n"
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F" \"v{version}\": \"v{version}\",\n"
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_lowerCAmelCase : Optional[int] = parser.parse_args()
update_custom_js(args.version)
| 705 |
'''simple docstring'''
from typing import Any
class lowerCAmelCase :
def __init__( self , snake_case__ ):
lowerCAmelCase : Optional[int] = data
lowerCAmelCase : Optional[Any] = None
def __repr__( self ):
return f"Node({self.data})"
class lowerCAmelCase :
def __init__( self ):
lowerCAmelCase : Dict = None
def __iter__( self ):
lowerCAmelCase : Optional[Any] = self.head
while node:
yield node.data
lowerCAmelCase : Optional[int] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
lowerCAmelCase : Any = self.head
for _ in range(snake_case__ ):
lowerCAmelCase : List[str] = current.next
lowerCAmelCase : int = data
def lowercase ( self , snake_case__ ):
self.insert_nth(len(self ) , snake_case__ )
def lowercase ( self , snake_case__ ):
self.insert_nth(0 , snake_case__ )
def lowercase ( self , snake_case__ , snake_case__ ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
lowerCAmelCase : List[str] = Node(snake_case__ )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : List[Any] = self.head # link new_node to head
lowerCAmelCase : List[Any] = new_node
else:
lowerCAmelCase : List[Any] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Union[str, Any] = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = new_node
def lowercase ( self ): # print every node data
print(self )
def lowercase ( self ):
return self.delete_nth(0 )
def lowercase ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , snake_case__ = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
lowerCAmelCase : List[str] = self.head # default first node
if index == 0:
lowerCAmelCase : Tuple = self.head.next
else:
lowerCAmelCase : Dict = self.head
for _ in range(index - 1 ):
lowerCAmelCase : Tuple = temp.next
lowerCAmelCase : Dict = temp.next
lowerCAmelCase : Tuple = temp.next.next
return delete_node.data
def lowercase ( self ):
return self.head is None
def lowercase ( self ):
lowerCAmelCase : List[Any] = None
lowerCAmelCase : Any = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : List[str] = current.next
# Make the current node's next point backwards
lowerCAmelCase : int = prev
# Make the previous node be the current node
lowerCAmelCase : int = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : Optional[Any] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : List[Any] = prev
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_A ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_A ) == i
linked_list.insert_nth(_A , i + 1 )
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_A ) == "->".join(str(_A ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_A ) == 9
assert str(_A ) == "->".join(str(_A ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_A ) == "->".join(str(_A ) for i in range(-8 , 1 ) )
def __UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
-9,
1_00,
Node(77_34_51_12 ),
'dlrow olleH',
7,
55_55,
0,
-1_92.5_55_55,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : Dict = LinkedList()
for i in test_input:
linked_list.insert_tail(_A )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_A ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : Optional[Any] = linked_list.delete_head()
assert result == -9
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : List[str] = linked_list.delete_tail()
assert result == 12.2
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : List[str] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_A ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_A )
assert (
str(_A )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_A )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase : Optional[Any] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_A )
print('\nReading/changing Node data using indexing:' )
print(F"Element at Position 1: {linked_list[1]}" )
lowerCAmelCase : Tuple = input('Enter New Value: ' ).strip()
print('New list:' )
print(_A )
print(F"length of linked_list is : {len(_A )}" )
if __name__ == "__main__":
main()
| 646 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase_ (unittest.TestCase ):
def __init__( self : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any]=7 , snake_case__ : List[str]=3 , snake_case__ : List[Any]=18 , snake_case__ : int=30 , snake_case__ : Dict=4_00 , snake_case__ : Any=True , snake_case__ : List[str]=None , snake_case__ : Tuple=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = size if size is not None else {'height': 18, 'width': 18}
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = min_resolution
SCREAMING_SNAKE_CASE_ = max_resolution
SCREAMING_SNAKE_CASE_ = do_resize
SCREAMING_SNAKE_CASE_ = size
SCREAMING_SNAKE_CASE_ = apply_ocr
def __a ( self : Any ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowercase_ (_lowercase , unittest.TestCase ):
lowerCAmelCase__ =LayoutLMvaImageProcessor if is_pytesseract_available() else None
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LayoutLMvaImageProcessingTester(self )
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'apply_ocr' ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
SCREAMING_SNAKE_CASE_ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def __a ( self : Optional[int] ):
"""simple docstring"""
pass
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(encoding.boxes , _SCREAMING_SNAKE_CASE )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE_ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
SCREAMING_SNAKE_CASE_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
SCREAMING_SNAKE_CASE_ = Image.open(ds[0]['file'] ).convert('RGB' )
SCREAMING_SNAKE_CASE_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE_ = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
SCREAMING_SNAKE_CASE_ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , _SCREAMING_SNAKE_CASE )
self.assertListEqual(encoding.boxes , _SCREAMING_SNAKE_CASE )
# with apply_OCR = False
SCREAMING_SNAKE_CASE_ = LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) | 360 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
_snake_case = 'pt'
elif is_tf_available():
_snake_case = 'tf'
else:
_snake_case = 'jax'
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = PerceiverTokenizer
UpperCAmelCase__ = False
def __lowercase( self ) -> Tuple:
super().setUp()
__UpperCamelCase = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowercase( self ) -> Union[str, Any]:
return PerceiverTokenizer.from_pretrained('deepmind/language-perceiver' )
def __lowercase( self , **_SCREAMING_SNAKE_CASE ) -> PerceiverTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=20 , _SCREAMING_SNAKE_CASE=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__UpperCamelCase = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
try:
__UpperCamelCase = tokenizer.decode([i] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _SCREAMING_SNAKE_CASE ) )
__UpperCamelCase = list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) )
if max_length is not None and len(_SCREAMING_SNAKE_CASE ) > max_length:
__UpperCamelCase = toks[:max_length]
if min_length is not None and len(_SCREAMING_SNAKE_CASE ) < min_length and len(_SCREAMING_SNAKE_CASE ) > 0:
while len(_SCREAMING_SNAKE_CASE ) < min_length:
__UpperCamelCase = toks + toks
# toks_str = [t[1] for t in toks]
__UpperCamelCase = [t[0] for t in toks]
# Ensure consistency
__UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
if " " not in output_txt and len(_SCREAMING_SNAKE_CASE ) > 1:
__UpperCamelCase = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
)
if with_prefix_space:
__UpperCamelCase = ' ' + output_txt
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
return output_txt, output_ids
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = 'Unicode €.'
__UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['input_ids'] , _SCREAMING_SNAKE_CASE )
# decoding
__UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , '[CLS]Unicode €.[SEP]' )
__UpperCamelCase = tokenizer('e è é ê ë' )
__UpperCamelCase = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['input_ids'] , _SCREAMING_SNAKE_CASE )
# decoding
__UpperCamelCase = tokenizer.decode(_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , '[CLS]e è é ê ë[SEP]' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , '[CLS]e è é ê ë[SEP]' )
def __lowercase( self ) -> Any:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__UpperCamelCase = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if FRAMEWORK != "jax":
__UpperCamelCase = list(batch.input_ids.numpy()[0] )
else:
__UpperCamelCase = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def __lowercase( self ) -> Optional[int]:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__UpperCamelCase = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _SCREAMING_SNAKE_CASE )
self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_input_ids' , _SCREAMING_SNAKE_CASE )
self.assertNotIn('decoder_attention_mask' , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.perceiver_tokenizer
__UpperCamelCase = [
'Summary of the text.',
'Another summary.',
]
__UpperCamelCase = tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowercase( self ) -> Tuple:
# safety check on max_len default value so we are sure the test works
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__UpperCamelCase = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__UpperCamelCase = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__UpperCamelCase = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = [f"""<extra_id_{i}>""" for i in range(125 )]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
__UpperCamelCase = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__UpperCamelCase = tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__UpperCamelCase = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_SCREAMING_SNAKE_CASE )]
__UpperCamelCase = tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '�' )
def __lowercase( self ) -> int:
pass
def __lowercase( self ) -> Dict:
pass
def __lowercase( self ) -> Dict:
pass
def __lowercase( self ) -> Optional[Any]:
pass
def __lowercase( self ) -> Union[str, Any]:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
__UpperCamelCase = self.get_tokenizers(fast=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__UpperCamelCase = tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 383 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
__magic_name__ = """xlm-roberta"""
def __init__( self , UpperCAmelCase__=3_0_5_2_2 , UpperCAmelCase__=7_6_8 , UpperCAmelCase__=1_2 , UpperCAmelCase__=1_2 , UpperCAmelCase__=3_0_7_2 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=5_1_2 , UpperCAmelCase__=2 , UpperCAmelCase__=0.0_2 , UpperCAmelCase__=1e-12 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=2 , UpperCAmelCase__="absolute" , UpperCAmelCase__=True , UpperCAmelCase__=None , **UpperCAmelCase__ , ) -> List[Any]:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
_A : Any = vocab_size
_A : str = hidden_size
_A : int = num_hidden_layers
_A : Optional[Any] = num_attention_heads
_A : List[str] = hidden_act
_A : Dict = intermediate_size
_A : Optional[Any] = hidden_dropout_prob
_A : Tuple = attention_probs_dropout_prob
_A : List[str] = max_position_embeddings
_A : Union[str, Any] = type_vocab_size
_A : str = initializer_range
_A : List[str] = layer_norm_eps
_A : Optional[int] = position_embedding_type
_A : str = use_cache
_A : Union[str, Any] = classifier_dropout
class lowerCamelCase__ ( snake_case_ ):
"""simple docstring"""
@property
def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_A : Dict = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 706 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowercase ( lowerCAmelCase : str , lowerCAmelCase : complex , lowerCAmelCase : str = "x" , lowerCAmelCase : float = 10**-10 , lowerCAmelCase : int = 1 , ):
"""simple docstring"""
_A : List[Any] = symbols(lowerCAmelCase)
_A : Any = lambdify(lowerCAmelCase , lowerCAmelCase)
_A : Tuple = lambdify(lowerCAmelCase , diff(lowerCAmelCase , lowerCAmelCase))
_A : int = starting_point
while True:
if diff_function(lowerCAmelCase) != 0:
_A : int = prev_guess - multiplicity * func(lowerCAmelCase) / diff_function(
lowerCAmelCase)
else:
raise ZeroDivisionError('''Could not find root''') from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
_A : Optional[int] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}')
# Find root of polynomial
# Find fourth Root of 5
print(f'The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5J)}')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
f'{newton_raphson("log(y) - 1", 2, variable="y")}',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
f'{newton_raphson("exp(x) - 1", 10, precision=0.005)}',
)
# Find root of cos(x)
print(f'The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}')
| 417 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_0 , __UpperCAmelCase=[1_0, 2_0, 3_0, 4_0] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = parent
lowerCAmelCase__ :Optional[int] = batch_size
lowerCAmelCase__ :int = image_size
lowerCAmelCase__ :Optional[Any] = num_channels
lowerCAmelCase__ :Any = embeddings_size
lowerCAmelCase__ :Dict = hidden_sizes
lowerCAmelCase__ :Any = depths
lowerCAmelCase__ :List[Any] = is_training
lowerCAmelCase__ :Optional[Any] = use_labels
lowerCAmelCase__ :Tuple = hidden_act
lowerCAmelCase__ :Dict = num_labels
lowerCAmelCase__ :Union[str, Any] = scope
lowerCAmelCase__ :Optional[Any] = len(_a )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :List[str] = None
if self.use_labels:
lowerCAmelCase__ :Tuple = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase__ :Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = RegNetModel(config=_a )
model.to(_a )
model.eval()
lowerCAmelCase__ :Union[str, Any] = model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = self.num_labels
lowerCAmelCase__ :Any = RegNetForImageClassification(_a )
model.to(_a )
model.eval()
lowerCAmelCase__ :str = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ :Tuple = config_and_inputs
lowerCAmelCase__ :int = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__magic_name__ :Any = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
__magic_name__ :Union[str, Any] = False
__magic_name__ :Dict = False
__magic_name__ :int = False
__magic_name__ :List[Any] = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = RegNetModelTester(self )
lowerCAmelCase__ :Tuple = ConfigTester(self , config_class=_a , has_text_modality=_a )
def snake_case ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
'''simple docstring'''
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Dict = model_class(_a )
lowerCAmelCase__ :Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :Dict = [*signature.parameters.keys()]
lowerCAmelCase__ :Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _a )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(config=_a )
for name, module in model.named_modules():
if isinstance(_a , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def snake_case ( self ):
'''simple docstring'''
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
lowerCAmelCase__ :str = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ :List[str] = model(**self._prepare_for_class(_a , _a ) )
lowerCAmelCase__ :List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase__ :Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase__ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Optional[Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase__ :Union[str, Any] = layer_type
lowerCAmelCase__ :Tuple = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :Optional[int] = True
check_hidden_states_output(_a , _a , _a )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def snake_case ( self ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ :Optional[Any] = RegNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_a )
lowerCAmelCase__ :Any = self.default_image_processor
lowerCAmelCase__ :Optional[int] = prepare_img()
lowerCAmelCase__ :Tuple = image_processor(images=_a , return_tensors='pt' ).to(_a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ :Union[str, Any] = model(**_a )
# verify the logits
lowerCAmelCase__ :Tuple = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
lowerCAmelCase__ :int = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
| 93 |
import torch
from diffusers import StableDiffusionPipeline
_snake_case = "path-to-your-trained-model"
_snake_case = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
_snake_case = "A photo of sks dog in a bucket"
_snake_case = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 307 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase : List[Any] = 1_6
lowerCamelCase : str = 3_2
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase = 16 ) -> List[Any]:
snake_case : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
snake_case : str = load_dataset("""glue""" ,"""mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
snake_case : List[Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase ,max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case : Dict = datasets.map(
lowercase ,batched=lowercase ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case : Union[str, Any] = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case : Dict = 16
elif accelerator.mixed_precision != "no":
snake_case : Optional[int] = 8
else:
snake_case : List[Any] = None
return tokenizer.pad(
lowercase ,padding="""longest""" ,max_length=lowercase ,pad_to_multiple_of=lowercase ,return_tensors="""pt""" ,)
# Instantiate dataloaders.
snake_case : Tuple = DataLoader(
tokenized_datasets["""train"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
snake_case : str = DataLoader(
tokenized_datasets["""validation"""] ,shuffle=lowercase ,collate_fn=lowercase ,batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase : Dict = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" ,lowercase ) == "1":
snake_case : str = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
snake_case : List[Any] = Accelerator(
cpu=args.cpu ,mixed_precision=args.mixed_precision ,log_with="""all""" ,project_dir=args.project_dir )
else:
snake_case : int = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case : List[str] = config["""lr"""]
snake_case : Any = int(config["""num_epochs"""] )
snake_case : Optional[int] = int(config["""seed"""] )
snake_case : List[Any] = int(config["""batch_size"""] )
set_seed(lowercase )
snake_case , snake_case : Union[str, Any] = get_dataloaders(lowercase ,lowercase )
snake_case : Dict = evaluate.load("""glue""" ,"""mrpc""" )
# If the batch size is too big we use gradient accumulation
snake_case : Union[str, Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
snake_case : Any = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" ,return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case : str = AdamW(params=model.parameters() ,lr=lowercase )
# Instantiate scheduler
snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=lowercase ,num_warmup_steps=100 ,num_training_steps=(len(lowercase ) * num_epochs) // gradient_accumulation_steps ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case : Union[str, Any] = accelerator.prepare(
lowercase ,lowercase ,lowercase ,lowercase ,lowercase )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
snake_case : str = os.path.split(lowercase )[-1].split(""".""" )[0]
accelerator.init_trackers(lowercase ,lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
snake_case : Optional[Any] = 0
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case : Optional[Any] = model(**lowercase )
snake_case : str = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
snake_case : Tuple = loss / gradient_accumulation_steps
accelerator.backward(lowercase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
snake_case : Any = model(**lowercase )
snake_case : Optional[Any] = outputs.logits.argmax(dim=-1 )
snake_case , snake_case : Any = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowercase ,references=lowercase ,)
snake_case : Dict = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" ,lowercase )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(lowercase ),
"""epoch""": epoch,
} ,step=lowercase ,)
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
snake_case : Tuple = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" ,type=lowercase ,default=lowercase ,choices=["""no""", """fp16""", """bf16""", """fp8"""] ,help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" ,)
parser.add_argument("""--cpu""" ,action="""store_true""" ,help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" ,action="""store_true""" ,help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" ,)
parser.add_argument(
"""--project_dir""" ,type=lowercase ,default="""logs""" ,help="""Location on where to store experiment tracking logs` and relevent project information""" ,)
snake_case : List[str] = parser.parse_args()
snake_case : Optional[Any] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowercase ,lowercase )
if __name__ == "__main__":
main()
| 684 |
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase : Union[str, Any] = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase : List[Any] = 'main'
# Default branch name
lowerCamelCase : Tuple = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase : List[Any] = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase : List[Any] = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase : int = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
print("""Welcome!""" )
yield
print("""Bye!""" )
@contextlib.contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
print("""Bonjour!""" )
yield
print("""Au revoir!""" )
class __lowercase (unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec("""transformers""" ) is not None
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> Optional[Any]:
with ContextManagers([] ):
print("""Transformers are awesome!""" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" )
@unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO )
def UpperCAmelCase ( self , A ) -> int:
with ContextManagers([context_fr(), context_en()] ):
print("""Transformers are awesome!""" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" )
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_tf
def UpperCAmelCase ( self ) -> str:
self.assertEqual(find_labels(A ) , ["""labels"""] )
self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] )
self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , ["""labels"""] )
@require_flax
def UpperCAmelCase ( self ) -> Any:
# Flax models don't have labels
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
self.assertEqual(find_labels(A ) , [] )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
pass
self.assertEqual(find_labels(A ) , [] )
| 684 | 1 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ , __magic_name__ : Union[str, Any] =analyze_text(lowerCamelCase )
__magic_name__ : List[Any] =list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
__magic_name__ : Union[str, Any] =sum(single_char_strings.values() )
# one length string
__magic_name__ : Optional[int] =0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__magic_name__ : str =single_char_strings[ch]
__magic_name__ : Dict =my_str / all_sum
my_fir_sum += prob * math.loga(lowerCamelCase ) # entropy formula.
# print entropy
print(F"{round(-1 * my_fir_sum ):.1f}" )
# two len string
__magic_name__ : Optional[Any] =sum(two_char_strings.values() )
__magic_name__ : int =0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__magic_name__ : List[str] =cha + cha
if sequence in two_char_strings:
__magic_name__ : str =two_char_strings[sequence]
__magic_name__ : Union[str, Any] =int(lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(lowerCamelCase )
# print second entropy
print(F"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(F"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Tuple =Counter() # type: ignore
__magic_name__ : int =Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCAmelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 21 | from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( snake_case_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'BlipImageProcessor'
lowercase = 'AutoTokenizer'
def __init__( self : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = False
super().__init__(snake_case , snake_case )
UpperCamelCase_ : Optional[Any] = self.image_processor
def __call__( self : str , snake_case : ImageInput = None , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : str , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
UpperCamelCase_ : Union[str, Any] = self.tokenizer
UpperCamelCase_ : List[Any] = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
return text_encoding
# add pixel_values
UpperCamelCase_ : Union[str, Any] = self.image_processor(snake_case , return_tensors=snake_case )
if text is not None:
UpperCamelCase_ : Optional[Any] = self.tokenizer(
text=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_token_type_ids=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
else:
UpperCamelCase_ : Any = None
if text_encoding is not None:
encoding_image_processor.update(snake_case )
return encoding_image_processor
def SCREAMING_SNAKE_CASE__ ( self : int , *snake_case : Union[str, Any] , **snake_case : Any ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Dict , *snake_case : str , **snake_case : Tuple ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = self.tokenizer.model_input_names
UpperCamelCase_ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 417 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _snake_case (_snake_case : List[str] , _snake_case : int) -> Any:
_lowercase =nn.functional.normalize(_snake_case)
_lowercase =nn.functional.normalize(_snake_case)
return torch.mm(_snake_case , normalized_text_embeds.t())
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : Tuple =CLIPConfig
__lowerCAmelCase : Dict =['''CLIPEncoderLayer''']
def __init__( self :Optional[Any], snake_case :CLIPConfig):
"""simple docstring"""
super().__init__(snake_case)
_lowercase =CLIPVisionModel(config.vision_config)
_lowercase =nn.Linear(config.vision_config.hidden_size, config.projection_dim, bias=snake_case)
_lowercase =nn.Parameter(torch.ones(17, config.projection_dim), requires_grad=snake_case)
_lowercase =nn.Parameter(torch.ones(3, config.projection_dim), requires_grad=snake_case)
_lowercase =nn.Parameter(torch.ones(17), requires_grad=snake_case)
_lowercase =nn.Parameter(torch.ones(3), requires_grad=snake_case)
@torch.no_grad()
def UpperCamelCase__ ( self :int, snake_case :str, snake_case :Optional[Any]):
"""simple docstring"""
_lowercase =self.vision_model(snake_case)[1] # pooled_output
_lowercase =self.visual_projection(snake_case)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowercase =cosine_distance(snake_case, self.special_care_embeds).cpu().float().numpy()
_lowercase =cosine_distance(snake_case, self.concept_embeds).cpu().float().numpy()
_lowercase =[]
_lowercase =image_embeds.shape[0]
for i in range(snake_case):
_lowercase ={'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase =0.0
for concept_idx in range(len(special_cos_dist[0])):
_lowercase =special_cos_dist[i][concept_idx]
_lowercase =self.special_care_embeds_weights[concept_idx].item()
_lowercase =round(concept_cos - concept_threshold + adjustment, 3)
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]})
_lowercase =0.0_1
for concept_idx in range(len(cos_dist[0])):
_lowercase =cos_dist[i][concept_idx]
_lowercase =self.concept_embeds_weights[concept_idx].item()
_lowercase =round(concept_cos - concept_threshold + adjustment, 3)
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case)
result.append(snake_case)
_lowercase =[len(res['bad_concepts']) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase__ ( self :Any, snake_case :torch.FloatTensor, snake_case :torch.FloatTensor):
"""simple docstring"""
_lowercase =self.vision_model(snake_case)[1] # pooled_output
_lowercase =self.visual_projection(snake_case)
_lowercase =cosine_distance(snake_case, self.special_care_embeds)
_lowercase =cosine_distance(snake_case, self.concept_embeds)
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
_lowercase =0.0
_lowercase =special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
_lowercase =torch.any(special_scores > 0, dim=1)
_lowercase =special_care * 0.0_1
_lowercase =special_adjustment.unsqueeze(1).expand(-1, cos_dist.shape[1])
_lowercase =(cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
_lowercase =torch.any(concept_scores > 0, dim=1)
return images, has_nsfw_concepts
| 557 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
def UpperCamelCase__ ( self :Any, snake_case :str):
"""simple docstring"""
with open(snake_case, encoding='utf-8') as input_file:
_lowercase =re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)')
_lowercase =input_file.read()
_lowercase =regexp.search(snake_case)
return match
def UpperCamelCase__ ( self :Optional[Any], snake_case :str):
"""simple docstring"""
with open(snake_case, encoding='utf-8') as input_file:
_lowercase =re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()', re.DOTALL)
_lowercase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_lowercase =regexp.finditer(snake_case)
_lowercase =[match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =Path('./datasets')
_lowercase =list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(snake_case)):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''')
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =Path('./datasets')
_lowercase =list(dataset_paths.absolute().glob('**/*.py'))
for dataset in dataset_files:
if self._no_print_statements(str(snake_case)):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''')
| 557 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _UpperCAmelCase ( snake_case ):
__lowerCamelCase: Optional[Any] = '''sew'''
def __init__( self : Dict , a : Any=3_2 , a : Tuple=7_6_8 , a : Optional[int]=1_2 , a : List[Any]=1_2 , a : Tuple=3_0_7_2 , a : Any=2 , a : Tuple="gelu" , a : List[str]=0.1 , a : Tuple=0.1 , a : List[str]=0.1 , a : Dict=0.0 , a : List[str]=0.1 , a : List[str]=0.1 , a : int=0.02 , a : Any=1e-5 , a : List[str]="group" , a : List[Any]="gelu" , a : List[str]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , a : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a : Optional[Any]=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a : List[str]=False , a : Optional[int]=1_2_8 , a : int=1_6 , a : List[str]=True , a : List[Any]=0.05 , a : List[str]=1_0 , a : Any=2 , a : int=0.0 , a : Union[str, Any]=1_0 , a : Optional[Any]=0 , a : Optional[int]="mean" , a : Union[str, Any]=False , a : Union[str, Any]=False , a : Union[str, Any]=2_5_6 , a : int=0 , a : Optional[int]=1 , a : List[str]=2 , **a : Dict , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[Any] = feat_extract_norm
lowercase_ : List[str] = feat_extract_activation
lowercase_ : int = list(SCREAMING_SNAKE_CASE_ )
lowercase_ : Tuple = list(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[str] = list(SCREAMING_SNAKE_CASE_ )
lowercase_ : List[Any] = conv_bias
lowercase_ : Any = num_conv_pos_embeddings
lowercase_ : List[str] = num_conv_pos_embedding_groups
lowercase_ : Union[str, Any] = len(self.conv_dim )
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : List[str] = intermediate_size
lowercase_ : List[Any] = squeeze_factor
lowercase_ : Dict = hidden_act
lowercase_ : Tuple = num_attention_heads
lowercase_ : int = hidden_dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : Tuple = activation_dropout
lowercase_ : List[str] = feat_proj_dropout
lowercase_ : Tuple = final_dropout
lowercase_ : Tuple = layerdrop
lowercase_ : Any = layer_norm_eps
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : Optional[int] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect."
"It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase_ : int = apply_spec_augment
lowercase_ : Any = mask_time_prob
lowercase_ : int = mask_time_length
lowercase_ : Any = mask_time_min_masks
lowercase_ : List[Any] = mask_feature_prob
lowercase_ : Dict = mask_feature_length
lowercase_ : Any = mask_feature_min_masks
# ctc loss
lowercase_ : List[str] = ctc_loss_reduction
lowercase_ : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowercase_ : int = use_weighted_layer_sum
lowercase_ : List[str] = classifier_proj_size
@property
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 620 |
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
__lowercase : List[str] = logging.get_logger(__name__)
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Tuple = '''vision-encoder-decoder'''
__lowerCamelCase : List[Any] = True
def __init__( self ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F"""A configuraton of type {self.model_type} cannot be instantiated because """
F"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" )
snake_case : Union[str, Any] = kwargs.pop("""encoder""" )
snake_case : Any = encoder_config.pop("""model_type""" )
snake_case : Optional[Any] = kwargs.pop("""decoder""" )
snake_case : Union[str, Any] = decoder_config.pop("""model_type""" )
snake_case : Any = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : Union[str, Any] = AutoConfig.for_model(SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ )
snake_case : int = True
@classmethod
def snake_case_ ( cls ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
snake_case : Tuple = True
snake_case : Union[str, Any] = True
return cls(encoder=encoder_config.to_dict() ,decoder=decoder_config.to_dict() ,**SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = copy.deepcopy(self.__dict__ )
snake_case : Union[str, Any] = self.encoder.to_dict()
snake_case : Union[str, Any] = self.decoder.to_dict()
snake_case : Dict = self.__class__.model_type
return output
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} )
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Tuple = OrderedDict()
snake_case : Optional[int] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Union[str, Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
snake_case : Optional[Any] = {0: """batch""", 1: """encoder_sequence"""}
return common_inputs
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = -1 ,SCREAMING_SNAKE_CASE_ = False ,SCREAMING_SNAKE_CASE_ = None ,):
'''simple docstring'''
import torch
snake_case : Optional[Any] = OrderedDict()
snake_case : Tuple = super().generate_dummy_inputs(
SCREAMING_SNAKE_CASE_ ,batch_size=SCREAMING_SNAKE_CASE_ ,seq_length=SCREAMING_SNAKE_CASE_ ,is_pair=SCREAMING_SNAKE_CASE_ ,framework=SCREAMING_SNAKE_CASE_ )
snake_case , snake_case : List[Any] = dummy_input["""input_ids"""].shape
snake_case : Optional[int] = (batch, encoder_sequence, self._config.encoder_hidden_size)
snake_case : List[str] = dummy_input.pop("""input_ids""" )
snake_case : int = dummy_input.pop("""attention_mask""" )
snake_case : Dict = torch.zeros(SCREAMING_SNAKE_CASE_ )
return common_inputs
class _A ( snake_case ):
'''simple docstring'''
@property
def snake_case_ ( self ):
'''simple docstring'''
pass
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return VisionEncoderDecoderEncoderOnnxConfig(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = "default" ):
'''simple docstring'''
snake_case : int = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
| 36 | 0 |
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase )
class A ( UpperCAmelCase ):
def __init__( self : Any , *__a : List[Any] , **__a : Tuple ) -> Optional[int]:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
requires_backends(self , '''decord''' )
self.check_model_type(UpperCamelCase_ )
def snake_case__ ( self : Any , __a : List[Any]=None , __a : str=None , __a : List[Any]=None ) -> Optional[Any]:
__UpperCAmelCase = {}
if frame_sampling_rate is not None:
__UpperCAmelCase = frame_sampling_rate
if num_frames is not None:
__UpperCAmelCase = num_frames
__UpperCAmelCase = {}
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any] , __a : Union[str, List[str]] , **__a : str ) -> int:
return super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
def snake_case__ ( self : int , __a : List[str] , __a : int=None , __a : Tuple=1 ) -> Dict:
if num_frames is None:
__UpperCAmelCase = self.model.config.num_frames
if video.startswith('''http://''' ) or video.startswith('''https://''' ):
__UpperCAmelCase = BytesIO(requests.get(UpperCamelCase_ ).content )
__UpperCAmelCase = VideoReader(UpperCamelCase_ )
videoreader.seek(0 )
__UpperCAmelCase = 0
__UpperCAmelCase = num_frames * frame_sampling_rate - 1
__UpperCAmelCase = np.linspace(UpperCamelCase_ , UpperCamelCase_ , num=UpperCamelCase_ , dtype=np.intaa )
__UpperCAmelCase = videoreader.get_batch(UpperCamelCase_ ).asnumpy()
__UpperCAmelCase = list(UpperCamelCase_ )
__UpperCAmelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self : Optional[Any] , __a : List[str] ) -> Union[str, Any]:
__UpperCAmelCase = self.model(**UpperCamelCase_ )
return model_outputs
def snake_case__ ( self : List[str] , __a : Any , __a : Optional[int]=5 ) -> Optional[Any]:
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.softmax(-1 )[0]
__UpperCAmelCase = probs.topk(UpperCamelCase_ )
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase_ , UpperCamelCase_ )]
| 704 | '''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def lowerCAmelCase ( ):
"""simple docstring"""
raise RuntimeError('''CUDA out of memory.''' )
class A ( nn.Module ):
def __init__( self : Optional[Any] ) -> int:
super().__init__()
__UpperCAmelCase = nn.Linear(3 , 4 )
__UpperCAmelCase = nn.BatchNormad(4 )
__UpperCAmelCase = nn.Linear(4 , 5 )
def snake_case__ ( self : List[str] , __a : Optional[int] ) -> Optional[int]:
return self.lineara(self.batchnorm(self.lineara(__a ) ) )
class A ( unittest.TestCase ):
def snake_case__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : Union[str, Any] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
def snake_case__ ( self : str ) -> int:
__UpperCAmelCase = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
__UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' )
self.assertListEqual(__a , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, '''hello'''] )
def snake_case__ ( self : Any ) -> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__a : Optional[int] ):
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : Any ) -> List[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Dict ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] )
def snake_case__ ( self : List[Any] ) -> List[str]:
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__a : str , __a : Union[str, Any] , __a : int ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__a ) as cm:
mock_training_loop_function(1_2_8 , '''hello''' , '''world''' )
self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] )
self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] )
def snake_case__ ( self : Tuple ) -> Optional[Any]:
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__a : Tuple ):
raise ValueError('''Oops, we had an error!''' )
with self.assertRaises(__a ) as cm:
mock_training_loop_function()
self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] )
@require_cuda
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = torch.cuda.memory_allocated()
__UpperCAmelCase = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __a )
__UpperCAmelCase = release_memory(__a )
self.assertEqual(torch.cuda.memory_allocated() , __a )
| 654 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self : List[str] , a_ : str , a_ : Any=13 , a_ : Union[str, Any]=30 , a_ : Union[str, Any]=2 , a_ : Union[str, Any]=3 , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : Union[str, Any]=32 , a_ : List[str]=5 , a_ : Union[str, Any]=4 , a_ : Union[str, Any]=37 , a_ : Tuple="gelu" , a_ : str=0.1 , a_ : Optional[Any]=0.1 , a_ : Optional[Any]=10 , a_ : Tuple=0.02 , a_ : Union[str, Any]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : int ):
"""simple docstring"""
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : str , a_ : Tuple , a_ : Optional[int] , a_ : int ):
"""simple docstring"""
__snake_case = ViTMSNModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Dict , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.type_sequence_label_size
__snake_case = ViTMSNForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , labels=a_ )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = ViTMSNForImageClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Tuple ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def A ( self : Optional[int] ):
"""simple docstring"""
__snake_case = ViTMSNModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , has_text_modality=a_ , hidden_size=37 )
def A ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def A ( self : Any ):
"""simple docstring"""
pass
def A ( self : str ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ , nn.Linear ) )
def A ( self : Dict ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a_ )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def A ( self : List[str] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ViTMSNModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def __UpperCAmelCase ( ) -> Any:
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def A ( self : List[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def A ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(2 )
__snake_case = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(a_ )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a_ , return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
__snake_case = model(**a_ )
# verify the logits
__snake_case = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a_ )
__snake_case = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a_ , atol=1e-4 ) )
| 69 |
def A_ ( _UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError("only integers accepted as input" )
else:
SCREAMING_SNAKE_CASE_: List[Any] = str(abs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Tuple = [list(_UpperCAmelCase ) for char in range(len(_UpperCAmelCase ) )]
for index in range(len(_UpperCAmelCase ) ):
num_transpositions[index].pop(_UpperCAmelCase )
return max(
int("".join(list(_UpperCAmelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 671 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
lowerCamelCase__ = ['''names''', '''prefix''']
lowerCamelCase__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
lowerCamelCase__ = ['''encoding_errors''', '''on_bad_lines''']
lowerCamelCase__ = ['''date_format''']
@dataclass
class __magic_name__ (datasets.BuilderConfig ):
lowerCamelCase__ = ","
lowerCamelCase__ = None
lowerCamelCase__ = "infer"
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = None
lowerCamelCase__ = "."
lowerCamelCase__ = None
lowerCamelCase__ = '"'
lowerCamelCase__ = 0
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = None
lowerCamelCase__ = 10000
lowerCamelCase__ = None
lowerCamelCase__ = "strict"
lowerCamelCase__ = "error"
lowerCamelCase__ = None
def __a ( self ) -> Optional[int]:
if self.delimiter is not None:
lowerCAmelCase_ = self.delimiter
if self.column_names is not None:
lowerCAmelCase_ = self.column_names
@property
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _a ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __magic_name__ (datasets.ArrowBasedBuilder ):
lowerCamelCase__ = CsvConfig
def __a ( self ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , _a ) -> int:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
lowerCAmelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_a , (str, list, tuple) ):
lowerCAmelCase_ = data_files
if isinstance(_a , _a ):
lowerCAmelCase_ = [files]
lowerCAmelCase_ = [dl_manager.iter_files(_a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCAmelCase_ = []
for split_name, files in data_files.items():
if isinstance(_a , _a ):
lowerCAmelCase_ = [files]
lowerCAmelCase_ = [dl_manager.iter_files(_a ) for file in files]
splits.append(datasets.SplitGenerator(name=_a , gen_kwargs={"files": files} ) )
return splits
def __a ( self , _a ) -> pa.Table:
if self.config.features is not None:
lowerCAmelCase_ = self.config.features.arrow_schema
if all(not require_storage_cast(_a ) for feature in self.config.features.values() ):
# cheaper cast
lowerCAmelCase_ = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_a )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
lowerCAmelCase_ = table_cast(_a , _a )
return pa_table
def __a ( self , _a ) -> str:
lowerCAmelCase_ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
lowerCAmelCase_ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_a ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_a ) ):
lowerCAmelCase_ = pd.read_csv(_a , iterator=_a , dtype=_a , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(_a ):
lowerCAmelCase_ = pa.Table.from_pandas(_a )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_a )
except ValueError as e:
logger.error(f"Failed to read file '{file}' with error {type(_a )}: {e}" )
raise
| 226 |
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
lowerCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def A(__a: int ):
lowerCAmelCase_ = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def A(__a: int ):
return sum(int(__a ) for c in str(abs(__a ) ) )
def A():
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a: Callable , __a: int ) -> None:
lowerCAmelCase_ = F"{func.__name__}({value})"
lowerCAmelCase_ = timeit(F"__main__.{call}" , setup="import __main__" )
print(F"{call:56} = {func(__a )} -- {timing:.4f} seconds" )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a , __a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 226 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowercase = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_lowercase = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_lowercase = """▁"""
# Segments (not really needed)
_lowercase = 0
_lowercase = 1
_lowercase = 2
_lowercase = 3
_lowercase = 4
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : str = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : str = '''left'''
_lowercase : Optional[int] = XLNetTokenizer
def __init__( self , _lowercase=None , _lowercase=None , _lowercase=False , _lowercase=True , _lowercase=False , _lowercase="<s>" , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<sep>" , _lowercase="<pad>" , _lowercase="<cls>" , _lowercase="<mask>" , _lowercase=["<eop>", "<eod>"] , **_lowercase , ):
"""simple docstring"""
_lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
vocab_file=_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , remove_space=_lowercase , keep_accents=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , additional_special_tokens=_lowercase , **_lowercase , )
_lowerCAmelCase = 3
_lowerCAmelCase = do_lower_case
_lowerCAmelCase = remove_space
_lowerCAmelCase = keep_accents
_lowerCAmelCase = vocab_file
_lowerCAmelCase = False if not self.vocab_file else True
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
_lowerCAmelCase = [self.sep_token_id]
_lowerCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _lowercase ( self , _lowercase , _lowercase = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowercase ):
copyfile(self.vocab_file , _lowercase )
return (out_vocab_file,)
| 5 |
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 0:
return False
_UpperCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 | 0 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase :
"""simple docstring"""
@staticmethod
def snake_case ( *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
pass
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Dict = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = DepthEstimationPipeline(model=__UpperCAmelCase , image_processor=__UpperCAmelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Any = depth_estimator('./tests/fixtures/tests_samples/COCO/000000039769.png' )
self.assertEqual({'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )} , __UpperCAmelCase )
import datasets
lowerCAmelCase__ :Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
lowerCAmelCase__ :Union[str, Any] = depth_estimator(
[
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
] )
self.assertEqual(
[
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
{'predicted_depth': ANY(torch.Tensor ), 'depth': ANY(Image.Image )},
] , __UpperCAmelCase , )
@require_tf
@unittest.skip('Depth estimation is not implemented in TF' )
def snake_case ( self ):
'''simple docstring'''
pass
@slow
@require_torch
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = 'Intel/dpt-large'
lowerCAmelCase__ :Optional[Any] = pipeline('depth-estimation' , model=__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = depth_estimator('http://images.cocodataset.org/val2017/000000039769.jpg' )
lowerCAmelCase__ :Union[str, Any] = hashimage(outputs['depth'] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['predicted_depth'].max().item() ) , 29.3_04 )
self.assertEqual(nested_simplify(outputs['predicted_depth'].min().item() ) , 2.6_62 )
@require_torch
def snake_case ( self ):
'''simple docstring'''
self.skipTest('There is not hf-internal-testing tiny model for either GLPN nor DPT' )
| 560 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = tempfile.mkdtemp()
lowerCAmelCase__ :Optional[Any] = BlipImageProcessor()
lowerCAmelCase__ :Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowerCAmelCase__ :Any = BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def snake_case ( self , **__UpperCAmelCase ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def snake_case ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCAmelCase__ :int = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Union[str, Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ :int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase__ :Any = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCAmelCase__ :int = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.get_image_processor()
lowerCAmelCase__ :Union[str, Any] = self.get_tokenizer()
lowerCAmelCase__ :Dict = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = self.prepare_image_inputs()
lowerCAmelCase__ :int = image_processor(__UpperCAmelCase , return_tensors='np' )
lowerCAmelCase__ :Optional[int] = processor(images=__UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_image_processor()
lowerCAmelCase__ :Any = self.get_tokenizer()
lowerCAmelCase__ :Any = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :str = 'lower newer'
lowerCAmelCase__ :Optional[int] = processor(text=__UpperCAmelCase )
lowerCAmelCase__ :str = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.get_image_processor()
lowerCAmelCase__ :List[str] = self.get_tokenizer()
lowerCAmelCase__ :Optional[int] = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 'lower newer'
lowerCAmelCase__ :Tuple = self.prepare_image_inputs()
lowerCAmelCase__ :Dict = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.get_image_processor()
lowerCAmelCase__ :Dict = self.get_tokenizer()
lowerCAmelCase__ :Any = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase__ :Tuple = processor.batch_decode(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.get_image_processor()
lowerCAmelCase__ :int = self.get_tokenizer()
lowerCAmelCase__ :Tuple = BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = 'lower newer'
lowerCAmelCase__ :Tuple = self.prepare_image_inputs()
lowerCAmelCase__ :Optional[int] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 560 | 1 |
"""simple docstring"""
from math import sqrt
def _snake_case ( lowercase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case ( lowercase__ = 10001 ):
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = 1
while count != nth and number < 3:
number += 1
if is_prime(_A ):
count += 1
while count != nth:
number += 2
if is_prime(_A ):
count += 1
return number
if __name__ == "__main__":
print(F"{solution() = }") | 630 |
"""simple docstring"""
import numpy as np
__UpperCAmelCase : Any = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
snake_case_ :str = np.array(a )
def _a ( self , a ):
"""simple docstring"""
snake_case_ , snake_case_ :int = np.where(letter == self.SQUARE )
snake_case_ :Any = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _a ( self , a , a ):
"""simple docstring"""
snake_case_ :int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _a ( self , a ):
"""simple docstring"""
snake_case_ :Dict = message.lower()
snake_case_ :List[str] = message.replace(" " , "" )
snake_case_ :Tuple = message.replace("j" , "i" )
snake_case_ :Optional[Any] = np.empty((2, len(a )) )
for letter_index in range(len(a ) ):
snake_case_ :Optional[int] = self.letter_to_numbers(message[letter_index] )
snake_case_ :str = numbers[0]
snake_case_ :Union[str, Any] = numbers[1]
snake_case_ :Optional[Any] = first_step.reshape(2 * len(a ) )
snake_case_ :Optional[int] = ""
for numbers_index in range(len(a ) ):
snake_case_ :Any = int(second_step[numbers_index * 2] )
snake_case_ :int = int(second_step[(numbers_index * 2) + 1] )
snake_case_ :Any = self.numbers_to_letter(a , a )
snake_case_ :Optional[int] = encoded_message + letter
return encoded_message
def _a ( self , a ):
"""simple docstring"""
snake_case_ :Optional[Any] = message.lower()
message.replace(" " , "" )
snake_case_ :List[str] = np.empty(2 * len(a ) )
for letter_index in range(len(a ) ):
snake_case_ :Optional[int] = self.letter_to_numbers(message[letter_index] )
snake_case_ :Optional[Any] = numbers[0]
snake_case_ :str = numbers[1]
snake_case_ :str = first_step.reshape((2, len(a )) )
snake_case_ :Optional[int] = ""
for numbers_index in range(len(a ) ):
snake_case_ :Union[str, Any] = int(second_step[0, numbers_index] )
snake_case_ :List[Any] = int(second_step[1, numbers_index] )
snake_case_ :List[str] = self.numbers_to_letter(a , a )
snake_case_ :str = decoded_message + letter
return decoded_message
| 584 | 0 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a__ ( _UpperCamelCase : Tuple ,_UpperCamelCase : Dict=0.999 ,_UpperCamelCase : List[Any]="cosine" ,):
if alpha_transform_type == "cosine":
def alpha_bar_fn(_UpperCamelCase : Dict ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_UpperCamelCase : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__lowerCamelCase = []
for i in range(__UpperCAmelCase ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) ,__UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase ,dtype=torch.floataa )
class __lowerCAmelCase ( _snake_case , _snake_case ):
lowerCAmelCase__ = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase__ = 2
@register_to_config
def __init__( self , __UpperCAmelCase = 1000 , __UpperCAmelCase = 0.00_085 , __UpperCAmelCase = 0.012 , __UpperCAmelCase = "linear" , __UpperCAmelCase = None , __UpperCAmelCase = "epsilon" , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = 1.0 , __UpperCAmelCase = "linspace" , __UpperCAmelCase = 0 , ):
'''simple docstring'''
if trained_betas is not None:
__lowerCamelCase = torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
__lowerCamelCase = betas_for_alpha_bar(__UpperCAmelCase , alpha_transform_type='''exp''' )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = use_karras_sigmas
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCamelCase ( self ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.index_for_timestep(__UpperCAmelCase )
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , ):
'''simple docstring'''
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , __UpperCAmelCase , dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , __UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(__UpperCAmelCase , 0 , -step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = np.log(__UpperCAmelCase )
__lowerCamelCase = np.interp(__UpperCAmelCase , np.arange(0 , len(__UpperCAmelCase ) ) , __UpperCAmelCase )
if self.config.use_karras_sigmas:
__lowerCamelCase = self._convert_to_karras(in_sigmas=__UpperCAmelCase , num_inference_steps=self.num_inference_steps )
__lowerCamelCase = np.array([self._sigma_to_t(__UpperCAmelCase , __UpperCAmelCase ) for sigma in sigmas] )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.from_numpy(__UpperCAmelCase )
__lowerCamelCase = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__UpperCAmelCase ).startswith('''mps''' ):
# mps does not support float64
__lowerCamelCase = timesteps.to(__UpperCAmelCase , dtype=torch.floataa )
else:
__lowerCamelCase = timesteps.to(device=__UpperCAmelCase )
# empty dt and derivative
__lowerCamelCase = None
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = np.log(__UpperCAmelCase )
# get distribution
__lowerCamelCase = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__lowerCamelCase = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = log_sigmas[low_idx]
__lowerCamelCase = log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = np.clip(__UpperCAmelCase , 0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.reshape(sigma.shape )
return t
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = in_sigmas[-1].item()
__lowerCamelCase = in_sigmas[0].item()
__lowerCamelCase = 7.0 # 7.0 is the value used in the paper
__lowerCamelCase = np.linspace(0 , 1 , __UpperCAmelCase )
__lowerCamelCase = sigma_min ** (1 / rho)
__lowerCamelCase = sigma_max ** (1 / rho)
__lowerCamelCase = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCamelCase ( self ):
'''simple docstring'''
return self.dt is None
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , ):
'''simple docstring'''
__lowerCamelCase = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_next
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__lowerCamelCase = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
__lowerCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
# store for 2nd order step
__lowerCamelCase = derivative
__lowerCamelCase = dt
__lowerCamelCase = sample
else:
# 2. 2nd order / Heun's method
__lowerCamelCase = (sample - pred_original_sample) / sigma_next
__lowerCamelCase = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__lowerCamelCase = self.dt
__lowerCamelCase = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(__UpperCAmelCase , __UpperCAmelCase ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 713 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=3.6 ):
'''simple docstring'''
__lowerCamelCase = tokenizer
__lowerCamelCase = tokenizer.bos_token_id
__lowerCamelCase = dataset
__lowerCamelCase = seq_length
__lowerCamelCase = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = iter(self.dataset )
__lowerCamelCase = True
while more_examples:
__lowerCamelCase ,__lowerCamelCase = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__UpperCAmelCase )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowerCamelCase = False
break
__lowerCamelCase = tokenizer(__UpperCAmelCase , truncation=__UpperCAmelCase )['''input_ids''']
__lowerCamelCase = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__UpperCAmelCase ) , self.seq_length ):
__lowerCamelCase = all_token_ids[i : i + self.seq_length]
if len(__UpperCAmelCase ) == self.seq_length:
yield torch.tensor(__UpperCAmelCase )
def a__ ( _UpperCamelCase : List[Any] ):
__lowerCamelCase = {'''streaming''': True}
__lowerCamelCase = load_dataset(args.dataset_name ,split='''train''' ,**_UpperCamelCase )
__lowerCamelCase = ConstantLengthDataset(_UpperCamelCase ,_UpperCamelCase ,seq_length=args.seq_length )
__lowerCamelCase = DataLoader(_UpperCamelCase ,batch_size=args.batch_size )
return eval_dataloader
def a__ ( _UpperCamelCase : str ):
model.eval()
__lowerCamelCase = []
for step, batch in enumerate(_UpperCamelCase ):
with torch.no_grad():
__lowerCamelCase = model(_UpperCamelCase ,labels=_UpperCamelCase )
__lowerCamelCase = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_UpperCamelCase ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowerCamelCase = torch.mean(torch.cat(_UpperCamelCase ) )
try:
__lowerCamelCase = torch.exp(_UpperCamelCase )
except OverflowError:
__lowerCamelCase = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ , a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
a_ , a_ = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 622 | 0 |
'''simple docstring'''
def a__ ( UpperCamelCase_ : str, UpperCamelCase_ : str ):
UpperCAmelCase__ :Any = len(UpperCamelCase_ ) + 1
UpperCAmelCase__ :List[str] = len(UpperCamelCase_ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
UpperCAmelCase__ :List[str] = [[0 for i in range(UpperCamelCase_ )] for j in range(UpperCamelCase_ )]
# since string of zero length match pattern of zero length
UpperCAmelCase__ :Optional[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1, UpperCamelCase_ ):
UpperCAmelCase__ :List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1, UpperCamelCase_ ):
UpperCAmelCase__ :Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1, UpperCamelCase_ ):
for j in range(1, UpperCamelCase_ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
UpperCAmelCase__ :Dict = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
UpperCAmelCase__ :Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
UpperCAmelCase__ :List[Any] = dp[i - 1][j]
else:
UpperCAmelCase__ :Optional[Any] = 0
else:
UpperCAmelCase__ :int = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
__lowerCamelCase = '''aab'''
__lowerCamelCase = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 467 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
__lowerCamelCase = logging.get_logger(__name__)
def a__ ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCAmelCase__ :Any = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCAmelCase__ :Any = json.loads(UpperCamelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCAmelCase__ :Optional[Any] = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCAmelCase__ :str = json.loads(UpperCamelCase_ )
if not mpi_options.get('''sagemaker_mpi_enabled''', UpperCamelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('''smdistributed''' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCAmelCase ( _snake_case ):
UpperCAmelCase = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
super().__post_init__()
warnings.warn(
'''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '''
'''`TrainingArguments` instead.''' , __lowerCamelCase , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
logger.info('''PyTorch: setting up devices''' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'''torch.distributed process group is initialized, but local_rank == -1. '''
'''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' )
if self.no_cuda:
UpperCAmelCase__ :Union[str, Any] = torch.device('''cpu''' )
UpperCAmelCase__ :Dict = 0
elif is_sagemaker_model_parallel_available():
UpperCAmelCase__ :Dict = smp.local_rank()
UpperCAmelCase__ :List[str] = torch.device('''cuda''' , __lowerCamelCase )
UpperCAmelCase__ :Optional[int] = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ :int = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) )
UpperCAmelCase__ :int = torch.device('''cuda''' , self.local_rank )
UpperCAmelCase__ :Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCAmelCase__ :str = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCAmelCase__ :str = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta )
UpperCAmelCase__ :str = torch.device('''cuda''' , self.local_rank )
UpperCAmelCase__ :Union[str, Any] = 1
if device.type == "cuda":
torch.cuda.set_device(__lowerCamelCase )
return device
@property
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
return not is_sagemaker_model_parallel_available()
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
return False
| 467 | 1 |
'''simple docstring'''
from jiwer import compute_measures
import datasets
lowercase__ : Tuple = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
lowercase__ : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
lowercase__ : List[str] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[int] ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Tuple=None , lowercase_ : str=None , lowercase_ : Optional[int]=False ) -> str:
if concatenate_texts:
return compute_measures(lowercase_ , lowercase_ )["wer"]
else:
UpperCAmelCase : int = 0
UpperCAmelCase : List[str] = 0
for prediction, reference in zip(lowercase_ , lowercase_ ):
UpperCAmelCase : List[str] = compute_measures(lowercase_ , lowercase_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 700 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 695 | 0 |
from typing import Any
class A__ :
def __init__( self , lowerCamelCase ) -> Dict:
"""simple docstring"""
__magic_name__ : List[str] = data
__magic_name__ : List[Any] = None
def __repr__( self ) -> str:
"""simple docstring"""
return F'''Node({self.data})'''
class A__ :
def __init__( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Optional[Any] = None
def __iter__( self ) -> Any:
"""simple docstring"""
__magic_name__ : Union[str, Any] = self.head
while node:
yield node.data
__magic_name__ : Union[str, Any] = node.next
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> str:
"""simple docstring"""
return "->".join([str(lowerCamelCase ) for item in self] )
def __getitem__( self , lowerCamelCase ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
__magic_name__ : str = self.head
for _ in range(lowerCamelCase ):
__magic_name__ : List[str] = current.next
__magic_name__ : int = data
def lowercase ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , lowerCamelCase )
def lowercase ( self , lowerCamelCase ) -> None:
"""simple docstring"""
self.insert_nth(0 , lowerCamelCase )
def lowercase ( self , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
__magic_name__ : Tuple = Node(lowerCamelCase )
if self.head is None:
__magic_name__ : Union[str, Any] = new_node
elif index == 0:
__magic_name__ : Union[str, Any] = self.head # link new_node to head
__magic_name__ : Tuple = new_node
else:
__magic_name__ : List[Any] = self.head
for _ in range(index - 1 ):
__magic_name__ : Optional[int] = temp.next
__magic_name__ : Any = temp.next
__magic_name__ : List[str] = new_node
def lowercase ( self ) -> None: # print every node data
"""simple docstring"""
print(self )
def lowercase ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def lowercase ( self ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def lowercase ( self , lowerCamelCase = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
__magic_name__ : Tuple = self.head # default first node
if index == 0:
__magic_name__ : Dict = self.head.next
else:
__magic_name__ : List[str] = self.head
for _ in range(index - 1 ):
__magic_name__ : Optional[Any] = temp.next
__magic_name__ : Dict = temp.next
__magic_name__ : Optional[int] = temp.next.next
return delete_node.data
def lowercase ( self ) -> bool:
"""simple docstring"""
return self.head is None
def lowercase ( self ) -> None:
"""simple docstring"""
__magic_name__ : str = None
__magic_name__ : str = self.head
while current:
# Store the current node's next node.
__magic_name__ : str = current.next
# Make the current node's next point backwards
__magic_name__ : List[str] = prev
# Make the previous node be the current node
__magic_name__ : List[str] = current
# Make the current node the next node (to progress iteration)
__magic_name__ : Optional[int] = next_node
# Return prev in order to put the head at the end
__magic_name__ : List[str] = prev
def lowerCAmelCase ( ) ->None:
"""simple docstring"""
__magic_name__ : List[Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCAmelCase ) == i
linked_list.insert_nth(UpperCAmelCase, i + 1 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1, 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(0, 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCAmelCase ) == 9
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(1, 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0, 9 ) ) is True
for i in range(0, 9 ):
__magic_name__ : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0, 9 ) ) is True
linked_list.reverse()
assert str(UpperCAmelCase ) == "->".join(str(UpperCAmelCase ) for i in range(-8, 1 ) )
def lowerCAmelCase ( ) ->None:
"""simple docstring"""
__magic_name__ : Any = [
-9,
100,
Node(7734_5112 ),
'''dlrow olleH''',
7,
5555,
0,
-1_92.5_55_55,
'''Hello, world!''',
77.9,
Node(10 ),
None,
None,
12.20,
]
__magic_name__ : Tuple = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__magic_name__ : Tuple = linked_list.delete_head()
assert result == -9
assert (
str(UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__magic_name__ : Tuple = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__magic_name__ : Any = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCAmelCase )
assert (
str(UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCAmelCase ( ) ->str:
"""simple docstring"""
from doctest import testmod
testmod()
__magic_name__ : Any = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(UpperCAmelCase )
print('''\nReading/changing Node data using indexing:''' )
print(F'''Element at Position 1: {linked_list[1]}''' )
__magic_name__ : Union[str, Any] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(UpperCAmelCase )
print(F'''length of linked_list is : {len(UpperCAmelCase )}''' )
if __name__ == "__main__":
main()
| 154 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(UpperCAmelCase, UpperCAmelCase ) ) )
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->list[list[list[float] | float]]:
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__magic_name__ : Union[str, Any] = (
'''Wrong input data\'s dimensions... '''
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(UpperCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
__magic_name__ : Tuple = (
'''Wrong input data\'s shape... '''
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(UpperCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
__magic_name__ : int = (
'''Input data have different datatype... '''
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(UpperCAmelCase )
__magic_name__ : Union[str, Any] = []
for value in value_array:
__magic_name__ : Tuple = euclidean(UpperCAmelCase, dataset[0] )
__magic_name__ : List[Any] = dataset[0].tolist()
for dataset_value in dataset[1:]:
__magic_name__ : Any = euclidean(UpperCAmelCase, UpperCAmelCase )
if dist > temp_dist:
__magic_name__ : Any = temp_dist
__magic_name__ : Union[str, Any] = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def lowerCAmelCase ( UpperCAmelCase, UpperCAmelCase ) ->float:
"""simple docstring"""
return np.dot(UpperCAmelCase, UpperCAmelCase ) / (norm(UpperCAmelCase ) * norm(UpperCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
"""simple docstring"""
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase : str = "" , UpperCamelCase : bool = False ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = {}
# A node will be a leaf if the tree contains its word
__UpperCAmelCase : str = is_leaf
__UpperCAmelCase : Any = prefix
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 0
for q, w in zip(self.prefix , A_ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowerCamelCase__ ( self : Any , UpperCamelCase : list[str] ):
'''simple docstring'''
for word in words:
self.insert(A_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase : str ):
'''simple docstring'''
if self.prefix == word:
__UpperCAmelCase : Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCAmelCase : Optional[int] = RadixNode(prefix=A_ , is_leaf=A_ )
else:
__UpperCAmelCase : int = self.nodes[word[0]]
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[str] = incoming_node.match(
A_ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(A_ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCAmelCase : Any = remaining_prefix
__UpperCAmelCase : Optional[Any] = self.nodes[matching_string[0]]
__UpperCAmelCase : Any = RadixNode(A_ , A_ )
__UpperCAmelCase : Tuple = aux_node
if remaining_word == "":
__UpperCAmelCase : Dict = True
else:
self.nodes[matching_string[0]].insert(A_ )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.nodes.get(word[0] , A_ )
if not incoming_node:
return False
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = incoming_node.match(
A_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(A_ )
def lowerCamelCase__ ( self : int , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.nodes.get(word[0] , A_ )
if not incoming_node:
return False
else:
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = incoming_node.match(
A_ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(A_ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__UpperCAmelCase : Optional[int] = list(self.nodes.values() )[0]
__UpperCAmelCase : Optional[int] = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCAmelCase : Optional[Any] = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__UpperCAmelCase : str = False
# If there is 1 edge, we merge it with its child
else:
__UpperCAmelCase : Union[str, Any] = list(incoming_node.nodes.values() )[0]
__UpperCAmelCase : int = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCAmelCase : Union[str, Any] = merging_node.nodes
return True
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int = 0 ):
'''simple docstring'''
if self.prefix != "":
print("""-""" * height , self.prefix , """ (leaf)""" if self.is_leaf else """""" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def lowerCamelCase ( ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase : Optional[int] = """banana bananas bandana band apple all beast""".split()
__UpperCAmelCase : Dict = RadixNode()
root.insert_many(_UpperCamelCase )
assert all(root.find(_UpperCamelCase ) for word in words )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
assert test_trie()
def lowerCamelCase ( ) -> str:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = RadixNode()
__UpperCAmelCase : Tuple = """banana bananas bandanas bandana band apple all beast""".split()
root.insert_many(_UpperCamelCase )
print("""Words:""" , _UpperCamelCase )
print("""Tree:""" )
root.print_tree()
if __name__ == "__main__":
main()
| 705 |
"""simple docstring"""
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : str = '▁'
UpperCAmelCase : Optional[int] = {'vocab_file': 'prophetnet.tokenizer'}
UpperCAmelCase : Optional[Any] = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
UpperCAmelCase : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
UpperCAmelCase : List[str] = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(_UpperCamelCase , """r""" , encoding="""utf-8""" ) as reader:
__UpperCAmelCase : Optional[Any] = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = token.rstrip("""\n""" )
__UpperCAmelCase : Union[str, Any] = index
return vocab
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Union[str, Any]="[SEP]" , UpperCamelCase : int="[SEP]" , UpperCamelCase : int="[UNK]" , UpperCamelCase : Tuple="[PAD]" , UpperCamelCase : Optional[Any]="[CLS]" , UpperCamelCase : Dict="[MASK]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Tuple , ):
'''simple docstring'''
__UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__UpperCAmelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase ) )
__UpperCAmelCase : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__UpperCAmelCase : Optional[Any] = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__UpperCAmelCase : int = f'''[unused{i}]'''
__UpperCAmelCase : Dict = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__UpperCAmelCase : List[str] = 12
__UpperCAmelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(UpperCamelCase )
def __getstate__( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.__dict__.copy()
__UpperCAmelCase : Any = None
return state
def __setstate__( self : Tuple , UpperCamelCase : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase : Tuple = {}
__UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is None:
return ([0] * len(UpperCamelCase )) + [1]
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1]
def lowerCamelCase__ ( self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
__UpperCAmelCase : int = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
__UpperCAmelCase : Dict = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[str] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCAmelCase : str = self.sp_model.PieceToId(UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self : str , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : Any = """""".join(UpperCamelCase ).replace(UpperCamelCase , """ """ ).strip()
return out_string
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase : Optional[int] = os.path.join(
UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , """wb""" ) as fi:
__UpperCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__UpperCAmelCase : Optional[int] = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 299 | 0 |
import warnings
from .generation import TFGenerationMixin
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCAmelCase_ , )
| 9 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Dict:
A__ = 'backbone.' if is_semantic else ''
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', 'beit.embeddings.cls_token'),
(f'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'),
(f'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'),
(f'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ) -> Optional[Any]:
for i in range(config.num_hidden_layers ):
A__ = 'backbone.' if is_semantic else ''
# queries, keys and values
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = q_bias
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
A__ = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
A__ = gamma_a
A__ = gamma_a
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ = dct.pop(__UpperCamelCase )
A__ = val
def A ( ) -> Dict:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ) -> str:
A__ = False if 'rvlcdip' in checkpoint_url else True
A__ = BeitConfig(use_absolute_position_embeddings=__UpperCamelCase , use_mask_token=__UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
# labels
if "rvlcdip" in checkpoint_url:
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'rvlcdip-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
A__ = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='cpu' )['model']
A__ = create_rename_keys(__UpperCamelCase , has_lm_head=__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , has_lm_head=__UpperCamelCase )
# load HuggingFace model
A__ = BeitForMaskedImageModeling(__UpperCamelCase ) if has_lm_head else BeitForImageClassification(__UpperCamelCase )
model.eval()
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image
A__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=__UpperCamelCase , return_tensors='pt' )
A__ = encoding['pixel_values']
A__ = model(__UpperCamelCase )
A__ = outputs.logits
# verify logits
A__ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8_192]
assert logits.shape == torch.Size(__UpperCamelCase ), "Shape of logits not as expected"
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
if has_lm_head:
A__ = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
A__ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(__UpperCamelCase , __UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 9 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = SamImageProcessor()
__magic_name__ = SamProcessor(UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def lowerCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
__magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
__magic_name__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__magic_name__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = SamProcessor(image_processor=UpperCamelCase_ )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__magic_name__ = processor(images=UpperCamelCase_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_torch
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = SamProcessor(image_processor=UpperCamelCase_ )
__magic_name__ = [torch.ones((1, 3, 5, 5) )]
__magic_name__ = [[1764, 2646]]
__magic_name__ = [[683, 1024]]
__magic_name__ = processor.post_process_masks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__magic_name__ = processor.post_process_masks(
UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , torch.tensor(UpperCamelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__magic_name__ = [np.ones((1, 3, 5, 5) )]
__magic_name__ = processor.post_process_masks(UpperCamelCase_ , np.array(UpperCamelCase_ ) , np.array(UpperCamelCase_ ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__magic_name__ = [[1, 0], [0, 1]]
with self.assertRaises(UpperCamelCase_ ):
__magic_name__ = processor.post_process_masks(UpperCamelCase_ , np.array(UpperCamelCase_ ) , np.array(UpperCamelCase_ ) )
@require_vision
@require_tf
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = SamImageProcessor()
__magic_name__ = SamProcessor(UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def lowerCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
__magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
__magic_name__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__magic_name__ = self.get_image_processor(do_normalize=UpperCamelCase_ , padding_value=1.0 )
__magic_name__ = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCamelCase_ , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = SamProcessor(image_processor=UpperCamelCase_ )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(UpperCamelCase_ , return_tensors='''np''' )
__magic_name__ = processor(images=UpperCamelCase_ , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
@require_tf
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = SamProcessor(image_processor=UpperCamelCase_ )
__magic_name__ = [tf.ones((1, 3, 5, 5) )]
__magic_name__ = [[1764, 2646]]
__magic_name__ = [[683, 1024]]
__magic_name__ = processor.post_process_masks(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__magic_name__ = processor.post_process_masks(
UpperCamelCase_ , tf.convert_to_tensor(UpperCamelCase_ ) , tf.convert_to_tensor(UpperCamelCase_ ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
__magic_name__ = [np.ones((1, 3, 5, 5) )]
__magic_name__ = processor.post_process_masks(
UpperCamelCase_ , np.array(UpperCamelCase_ ) , np.array(UpperCamelCase_ ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
__magic_name__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__magic_name__ = processor.post_process_masks(
UpperCamelCase_ , np.array(UpperCamelCase_ ) , np.array(UpperCamelCase_ ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
__magic_name__ = tempfile.mkdtemp()
__magic_name__ = SamImageProcessor()
__magic_name__ = SamProcessor(UpperCamelCase_ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self , **UpperCamelCase_ ):
return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ).image_processor
def lowerCAmelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
__magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__magic_name__ = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = SamProcessor(image_processor=UpperCamelCase_ )
__magic_name__ = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__magic_name__ = [tf.convert_to_tensor(UpperCamelCase_ )]
__magic_name__ = [torch.tensor(UpperCamelCase_ )]
__magic_name__ = [[1764, 2646]]
__magic_name__ = [[683, 1024]]
__magic_name__ = processor.post_process_masks(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , return_tensors='''tf''' )
__magic_name__ = processor.post_process_masks(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCAmelCase__ ( self ):
__magic_name__ = self.get_image_processor()
__magic_name__ = SamProcessor(image_processor=UpperCamelCase_ )
__magic_name__ = self.prepare_image_inputs()
__magic_name__ = image_processor(UpperCamelCase_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
__magic_name__ = processor(images=UpperCamelCase_ , return_tensors='''pt''' )['''pixel_values'''].numpy()
__magic_name__ = image_processor(UpperCamelCase_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
__magic_name__ = processor(images=UpperCamelCase_ , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
self.assertTrue(np.allclose(UpperCamelCase_ , UpperCamelCase_ ) )
| 190 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
__magic_name__ = start
__magic_name__ = end
__magic_name__ = val
__magic_name__ = (start + end) // 2
__magic_name__ = left
__magic_name__ = right
def __repr__( self ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class _lowercase :
def __init__( self , UpperCamelCase_ , UpperCamelCase_ ):
__magic_name__ = collection
__magic_name__ = function
if self.collection:
__magic_name__ = self._build_tree(0 , len(UpperCamelCase_ ) - 1 )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
self._update_tree(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
return self._query_range(self.root , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ ):
if start == end:
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.collection[start] )
__magic_name__ = (start + end) // 2
__magic_name__ = self._build_tree(UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self._build_tree(mid + 1 , UpperCamelCase_ )
return SegmentTreeNode(UpperCamelCase_ , UpperCamelCase_ , self.fn(left.val , right.val ) , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == i:
__magic_name__ = val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
self._update_tree(node.right , UpperCamelCase_ , UpperCamelCase_ )
__magic_name__ = self.fn(node.left.val , node.right.val )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase_ , UpperCamelCase_ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase_ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
if self.root is not None:
__magic_name__ = Queue()
queue.put(self.root )
while not queue.empty():
__magic_name__ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
__lowerCamelCase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 190 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _lowerCamelCase ( a_ : Optional[int]):
lowerCamelCase :Dict = SwinvaConfig()
lowerCamelCase :int = swinva_name.split('''_''')
lowerCamelCase :Tuple = name_split[1]
if "to" in name_split[3]:
lowerCamelCase :Dict = int(name_split[3][-3:])
else:
lowerCamelCase :Optional[Any] = int(name_split[3])
if "to" in name_split[2]:
lowerCamelCase :Optional[Any] = int(name_split[2][-2:])
else:
lowerCamelCase :Dict = int(name_split[2][6:])
if model_size == "tiny":
lowerCamelCase :int = 96
lowerCamelCase :List[Any] = (2, 2, 6, 2)
lowerCamelCase :Tuple = (3, 6, 12, 24)
elif model_size == "small":
lowerCamelCase :int = 96
lowerCamelCase :Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase :Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
lowerCamelCase :Tuple = 1_28
lowerCamelCase :Optional[int] = (2, 2, 18, 2)
lowerCamelCase :Union[str, Any] = (4, 8, 16, 32)
else:
lowerCamelCase :Optional[Any] = 1_92
lowerCamelCase :Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase :Optional[int] = (6, 12, 24, 48)
if "to" in swinva_name:
lowerCamelCase :List[Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowerCamelCase :List[str] = 2_18_41
lowerCamelCase :List[Any] = '''huggingface/label-files'''
lowerCamelCase :Any = '''imagenet-22k-id2label.json'''
lowerCamelCase :Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :Tuple = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Union[str, Any] = idalabel
lowerCamelCase :str = {v: k for k, v in idalabel.items()}
else:
lowerCamelCase :Union[str, Any] = 10_00
lowerCamelCase :Union[str, Any] = '''huggingface/label-files'''
lowerCamelCase :List[str] = '''imagenet-1k-id2label.json'''
lowerCamelCase :List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
lowerCamelCase :int = {int(a_): v for k, v in idalabel.items()}
lowerCamelCase :Any = idalabel
lowerCamelCase :str = {v: k for k, v in idalabel.items()}
lowerCamelCase :Union[str, Any] = img_size
lowerCamelCase :Tuple = num_classes
lowerCamelCase :Optional[Any] = embed_dim
lowerCamelCase :int = depths
lowerCamelCase :List[str] = num_heads
lowerCamelCase :Dict = window_size
return config
def _lowerCamelCase ( a_ : Tuple):
if "patch_embed.proj" in name:
lowerCamelCase :List[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''')
if "patch_embed.norm" in name:
lowerCamelCase :Dict = name.replace('''patch_embed.norm''' , '''embeddings.norm''')
if "layers" in name:
lowerCamelCase :Tuple = '''encoder.''' + name
if "attn.proj" in name:
lowerCamelCase :Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''')
if "attn" in name:
lowerCamelCase :str = name.replace('''attn''' , '''attention.self''')
if "norm1" in name:
lowerCamelCase :Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''')
if "norm2" in name:
lowerCamelCase :Dict = name.replace('''norm2''' , '''layernorm_after''')
if "mlp.fc1" in name:
lowerCamelCase :Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''')
if "mlp.fc2" in name:
lowerCamelCase :Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''')
if "q_bias" in name:
lowerCamelCase :Optional[int] = name.replace('''q_bias''' , '''query.bias''')
if "k_bias" in name:
lowerCamelCase :int = name.replace('''k_bias''' , '''key.bias''')
if "v_bias" in name:
lowerCamelCase :Optional[Any] = name.replace('''v_bias''' , '''value.bias''')
if "cpb_mlp" in name:
lowerCamelCase :Any = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''')
if name == "norm.weight":
lowerCamelCase :Optional[int] = '''layernorm.weight'''
if name == "norm.bias":
lowerCamelCase :Union[str, Any] = '''layernorm.bias'''
if "head" in name:
lowerCamelCase :Union[str, Any] = name.replace('''head''' , '''classifier''')
else:
lowerCamelCase :Tuple = '''swinv2.''' + name
return name
def _lowerCamelCase ( a_ : List[str] , a_ : Any):
for key in orig_state_dict.copy().keys():
lowerCamelCase :Tuple = orig_state_dict.pop(a_)
if "mask" in key:
continue
elif "qkv" in key:
lowerCamelCase :Optional[int] = key.split('''.''')
lowerCamelCase :Tuple = int(key_split[1])
lowerCamelCase :Dict = int(key_split[3])
lowerCamelCase :Union[str, Any] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase :int = val[:dim, :]
lowerCamelCase :str = val[dim : dim * 2, :]
lowerCamelCase :Optional[int] = val[-dim:, :]
else:
lowerCamelCase :Dict = val[:dim]
lowerCamelCase :Any = val[
dim : dim * 2
]
lowerCamelCase :Optional[int] = val[-dim:]
else:
lowerCamelCase :Any = val
return orig_state_dict
def _lowerCamelCase ( a_ : Union[str, Any] , a_ : List[str]):
lowerCamelCase :Dict = timm.create_model(a_ , pretrained=a_)
timm_model.eval()
lowerCamelCase :int = get_swinva_config(a_)
lowerCamelCase :Optional[Any] = SwinvaForImageClassification(a_)
model.eval()
lowerCamelCase :List[str] = convert_state_dict(timm_model.state_dict() , a_)
model.load_state_dict(a_)
lowerCamelCase :List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase :int = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''')))
lowerCamelCase :List[str] = Image.open(requests.get(a_ , stream=a_).raw)
lowerCamelCase :Dict = image_processor(images=a_ , return_tensors='''pt''')
lowerCamelCase :Tuple = timm_model(inputs['''pixel_values'''])
lowerCamelCase :int = model(**a_).logits
assert torch.allclose(a_ , a_ , atol=1e-3)
print(F"Saving model {swinva_name} to {pytorch_dump_folder_path}")
model.save_pretrained(a_)
print(F"Saving image processor to {pytorch_dump_folder_path}")
image_processor.save_pretrained(a_)
model.push_to_hub(
repo_path_or_name=Path(a_ , a_) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
A__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 166 | import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
A__ = {
"""facebook/maskformer-swin-base-ade""": (
"""https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json"""
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
A__ = logging.get_logger(__name__)
class _lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 'maskformer'
_UpperCAmelCase = {'hidden_size': 'mask_feature_size'}
_UpperCAmelCase = ['resnet', 'swin']
_UpperCAmelCase = ['detr']
def __init__( self : List[str] , __snake_case : int = 256 , __snake_case : int = 256 , __snake_case : float = 0.1 , __snake_case : bool = False , __snake_case : Optional[Dict] = None , __snake_case : Optional[Dict] = None , __snake_case : float = 0.0_2 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 1.0 , __snake_case : float = 2_0.0 , __snake_case : Optional[bool] = None , **__snake_case : Any , ):
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
lowerCamelCase :Optional[int] = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , )
if isinstance(__snake_case , __snake_case ):
lowerCamelCase :int = backbone_config.pop('''model_type''' )
lowerCamelCase :Tuple = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase :Union[str, Any] = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. "
F"Supported model types: {','.join(self.backbones_supported )}" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
lowerCamelCase :Optional[int] = DetrConfig()
else:
# verify that the decoder is supported
lowerCamelCase :int = (
decoder_config.pop('''model_type''' ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"Transformer Decoder {decoder_type} not supported, please use one of"
F" {','.join(self.decoders_supported )}" )
if isinstance(__snake_case , __snake_case ):
lowerCamelCase :List[Any] = CONFIG_MAPPING[decoder_type]
lowerCamelCase :Optional[Any] = config_class.from_dict(__snake_case )
lowerCamelCase :Tuple = backbone_config
lowerCamelCase :int = decoder_config
# main feature dimension for the model
lowerCamelCase :Union[str, Any] = fpn_feature_size
lowerCamelCase :List[Any] = mask_feature_size
# initializer
lowerCamelCase :Any = init_std
lowerCamelCase :List[str] = init_xavier_std
# Hungarian matcher && loss
lowerCamelCase :List[str] = cross_entropy_weight
lowerCamelCase :Union[str, Any] = dice_weight
lowerCamelCase :Dict = mask_weight
lowerCamelCase :Optional[Any] = use_auxiliary_loss
lowerCamelCase :Dict = no_object_weight
lowerCamelCase :List[Any] = output_auxiliary_logits
lowerCamelCase :Any = self.decoder_config.encoder_attention_heads
lowerCamelCase :List[str] = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def snake_case ( cls : Tuple , __snake_case : PretrainedConfig , __snake_case : PretrainedConfig , **__snake_case : Any ):
return cls(
backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , )
def snake_case ( self : Tuple ):
lowerCamelCase :Dict = copy.deepcopy(self.__dict__ )
lowerCamelCase :Dict = self.backbone_config.to_dict()
lowerCamelCase :int = self.decoder_config.to_dict()
lowerCamelCase :List[str] = self.__class__.model_type
return output
| 166 | 1 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : int = logging.get_logger(__name__)
_UpperCAmelCase : Any = {"""vocab_file""": """spiece.model"""}
_UpperCAmelCase : Optional[int] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
_UpperCAmelCase : List[Any] = {
"""google/bigbird-roberta-base""": 4_0_9_6,
"""google/bigbird-roberta-large""": 4_0_9_6,
"""google/bigbird-base-trivia-itc""": 4_0_9_6,
}
class a__ ( lowerCAmelCase__ ):
"""simple docstring"""
__UpperCamelCase : int = VOCAB_FILES_NAMES
__UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Tuple = ["input_ids", "attention_mask"]
__UpperCamelCase : List[int] = []
def __init__(self , __lowercase , __lowercase="<unk>" , __lowercase="<s>" , __lowercase="</s>" , __lowercase="<pad>" , __lowercase="[SEP]" , __lowercase="[MASK]" , __lowercase="[CLS]" , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
__lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
__lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
__lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
__lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
__lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , sep_token=_lowerCamelCase , mask_token=_lowerCamelCase , cls_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def _snake_case (self ):
return self.sp_model.get_piece_size()
def _snake_case (self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__(self , __lowercase ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case (self , __lowercase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _snake_case (self , __lowercase ):
return self.sp_model.piece_to_id(_lowerCamelCase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def _snake_case (self , __lowercase ):
__lowerCAmelCase = []
__lowerCAmelCase = ''''''
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCamelCase ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(_lowerCamelCase )
__lowerCAmelCase = False
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def _snake_case (self , __lowercase , __lowercase = False , __lowercase = None , __lowercase = True , **__lowercase , ):
__lowerCAmelCase = kwargs.pop('''use_source_tokenizer''' , _lowerCamelCase )
__lowerCAmelCase = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowerCAmelCase = []
__lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
__lowerCAmelCase = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__lowerCAmelCase = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(_lowerCamelCase ) )
else:
__lowerCAmelCase = ''''''.join(_lowerCamelCase )
__lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowerCAmelCase = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def _snake_case (self , __lowercase , __lowercase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _snake_case (self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 712 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCAmelCase : str = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCAmelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase , __lowerCAmelCase = create_model(
'''HTSAT-tiny''', '''roberta''', lowerCamelCase, precision='''fp32''', device='''cuda:0''' if torch.cuda.is_available() else '''cpu''', enable_fusion=lowerCamelCase, fusion_type='''aff_2d''' if enable_fusion else None, )
return model, model_cfg
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = {}
__lowerCAmelCase = r'''.*sequential.(\d+).*'''
__lowerCAmelCase = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase = key.replace(lowerCamelCase, lowerCamelCase)
if re.match(lowerCamelCase, lowerCamelCase):
# replace sequential layers with list
__lowerCAmelCase = re.match(lowerCamelCase, lowerCamelCase).group(1)
__lowerCAmelCase = key.replace(F"""sequential.{sequential_layer}.""", F"""layers.{int(lowerCamelCase)//3}.linear.""")
elif re.match(lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = int(re.match(lowerCamelCase, lowerCamelCase).group(1))
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase = 1 if projecton_layer == 0 else 2
__lowerCAmelCase = key.replace(F"""_projection.{projecton_layer}.""", F"""_projection.linear{transformers_projection_layer}.""")
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase = value
__lowerCAmelCase = mixed_qkv.size(0) // 3
__lowerCAmelCase = mixed_qkv[:qkv_dim]
__lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase = query_layer
__lowerCAmelCase = key_layer
__lowerCAmelCase = value_layer
else:
__lowerCAmelCase = value
return model_state_dict
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase , __lowerCAmelCase = init_clap(lowerCamelCase, enable_fusion=lowerCamelCase)
clap_model.eval()
__lowerCAmelCase = clap_model.state_dict()
__lowerCAmelCase = rename_state_dict(lowerCamelCase)
__lowerCAmelCase = ClapConfig()
__lowerCAmelCase = enable_fusion
__lowerCAmelCase = ClapModel(lowerCamelCase)
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCamelCase, strict=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
transformers_config.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
_UpperCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCAmelCase : Optional[int] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 474 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : Optional[int] = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 |
"""simple docstring"""
def __UpperCamelCase ( SCREAMING_SNAKE_CASE = 1_00_00_00 ) -> int:
"""simple docstring"""
__snake_case = 1
__snake_case = 1
__snake_case = {1: 1}
for inputa in range(2 , SCREAMING_SNAKE_CASE ):
__snake_case = 0
__snake_case = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__snake_case = (3 * number) + 1
counter += 1
if inputa not in counters:
__snake_case = counter
if counter > pre_counter:
__snake_case = inputa
__snake_case = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 163 | 0 |
'''simple docstring'''
import argparse
from collections import defaultdict
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = f'{file}_{class_name}_{test_name}'
done_test[_id] += 1
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__SCREAMING_SNAKE_CASE : Optional[Any] = f.readlines()
__SCREAMING_SNAKE_CASE : str = f'class {class_name}('
__SCREAMING_SNAKE_CASE : str = f'{4 * " "}def {test_name}('
__SCREAMING_SNAKE_CASE : Any = f'{8 * " "}{correct_line.split()[0]}'
__SCREAMING_SNAKE_CASE : Optional[Any] = f'{1_6 * " "}{correct_line.split()[0]}'
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Optional[int] = 0
__SCREAMING_SNAKE_CASE : int = []
for line in lines:
if line.startswith(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Optional[int] = True
elif in_class and line.startswith(_SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
elif in_class and in_func and (line.startswith(_SCREAMING_SNAKE_CASE ) or line.startswith(_SCREAMING_SNAKE_CASE )):
__SCREAMING_SNAKE_CASE : str = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__SCREAMING_SNAKE_CASE : int = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__SCREAMING_SNAKE_CASE : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'{spaces * " "}{correct_line}' )
__SCREAMING_SNAKE_CASE : List[Any] = False
else:
new_lines.append(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(_SCREAMING_SNAKE_CASE )
def __A ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[int]=None ):
"""simple docstring"""
if fail is not None:
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__SCREAMING_SNAKE_CASE : int = {l.strip() for l in f.readlines()}
else:
__SCREAMING_SNAKE_CASE : str = None
with open(_SCREAMING_SNAKE_CASE , "r" ) as f:
__SCREAMING_SNAKE_CASE : str = f.readlines()
__SCREAMING_SNAKE_CASE : str = defaultdict(_SCREAMING_SNAKE_CASE )
for line in correct_lines:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''')
parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None)
lowercase = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 564 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : Tuple = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(self.tmpdirname , a__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(a__ , a__ )
def a_ ( self , **a__ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self , **a__ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **a__ )
def a_ ( self ):
shutil.rmtree(self.tmpdirname )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE : List[Any] = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=a__ )
__SCREAMING_SNAKE_CASE : Optional[int] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , a__ )
self.assertIsInstance(processor_fast.tokenizer , a__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , a__ )
self.assertIsInstance(processor_fast.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__SCREAMING_SNAKE_CASE : List[str] = self.get_image_processor(do_normalize=a__ , padding_value=1.0 )
__SCREAMING_SNAKE_CASE : Any = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=a__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , a__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Tuple = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[Any] = image_processor(a__ , return_tensors="np" )
__SCREAMING_SNAKE_CASE : Dict = processor(images=a__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Tuple = self.get_image_processor()
__SCREAMING_SNAKE_CASE : int = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : List[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ )
__SCREAMING_SNAKE_CASE : int = tokenizer(a__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : str = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : List[str] = "lower newer"
__SCREAMING_SNAKE_CASE : str = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : str = self.get_image_processor()
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Dict = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE : Dict = processor.batch_decode(a__ )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.batch_decode(a__ )
self.assertListEqual(a__ , a__ )
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE : Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : str = AlignProcessor(tokenizer=a__ , image_processor=a__ )
__SCREAMING_SNAKE_CASE : Any = "lower newer"
__SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE : List[str] = processor(text=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 564 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase : int = {
'configuration_distilbert': [
'DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'DistilBertConfig',
'DistilBertOnnxConfig',
],
'tokenization_distilbert': ['DistilBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = ['DistilBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DistilBertForMaskedLM',
'DistilBertForMultipleChoice',
'DistilBertForQuestionAnswering',
'DistilBertForSequenceClassification',
'DistilBertForTokenClassification',
'DistilBertModel',
'DistilBertPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = [
'TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDistilBertForMaskedLM',
'TFDistilBertForMultipleChoice',
'TFDistilBertForQuestionAnswering',
'TFDistilBertForSequenceClassification',
'TFDistilBertForTokenClassification',
'TFDistilBertMainLayer',
'TFDistilBertModel',
'TFDistilBertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : str = [
'FlaxDistilBertForMaskedLM',
'FlaxDistilBertForMultipleChoice',
'FlaxDistilBertForQuestionAnswering',
'FlaxDistilBertForSequenceClassification',
'FlaxDistilBertForTokenClassification',
'FlaxDistilBertModel',
'FlaxDistilBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowercase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 557 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
lowercase : Union[str, Any] = ['bert-base-uncased', 'bert-base-cased']
lowercase : Any = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowerCamelCase__ ( tf.keras.Model):
'''simple docstring'''
def __init__( self :List[Any] , a :int ) -> int:
super().__init__()
__UpperCamelCase : Any = tokenizer
__UpperCamelCase : Union[str, Any] = AutoConfig.from_pretrained(a )
__UpperCamelCase : Dict = TFAutoModel.from_config(a )
def _lowerCamelCase ( self :List[Any] , a :Dict ) -> Any:
__UpperCamelCase : int = self.tokenizer(a )
__UpperCamelCase : int = self.bert(**a )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def _lowerCamelCase ( self :str ) -> str:
super().setUp()
__UpperCamelCase : Dict = [
BertTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__UpperCamelCase : List[Any] = [TFBertTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(a , use_fast_bert_tokenizer=a )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__UpperCamelCase : Dict = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__UpperCamelCase : Dict = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCamelCase ( self :List[Any] ) -> int:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase : str = tokenizer(a , return_tensors="tf" , padding="longest" )
__UpperCamelCase : Any = tf_tokenizer(a )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def _lowerCamelCase ( self :Union[str, Any] ) -> Any:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : List[str] = tf_tokenizer(self.paired_sentences )
__UpperCamelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def _lowerCamelCase ( self :Any ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : Optional[Any] = tf.function(a )
for test_inputs in (self.test_sentences, self.paired_sentences):
__UpperCamelCase : Tuple = tf.constant(a )
__UpperCamelCase : List[str] = compiled_tokenizer(a )
__UpperCamelCase : Any = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCamelCase ( self :List[Any] ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
__UpperCamelCase : str = ModelToSave(tokenizer=a )
__UpperCamelCase : List[Any] = tf.convert_to_tensor(self.test_sentences )
__UpperCamelCase : Optional[int] = model(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__UpperCamelCase : Optional[Any] = Path(a ) / "saved.model"
model.save(a )
__UpperCamelCase : List[Any] = tf.keras.models.load_model(a )
__UpperCamelCase : Optional[Any] = loaded_model(a )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 ) | 557 | 1 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase__ : Union[str, Any] = img
lowercase__ : List[str] = img.shape[1]
lowercase__ : Dict = img.shape[0]
lowercase__ : str = dst_width
lowercase__ : Tuple = dst_height
lowercase__ : Tuple = self.src_w / self.dst_w
lowercase__ : List[Any] = self.src_h / self.dst_h
lowercase__ : str = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _lowerCAmelCase( self ) -> Any:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Union[str, Any] = self.img[self.get_y(__lowerCAmelCase )][self.get_x(__lowerCAmelCase )]
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return int(self.ratio_x * x )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__a: Dict = 8_00, 6_00
__a: Optional[Any] = imread("""image_data/lena.jpg""", 1)
__a: Optional[int] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 705 | '''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a: Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : str = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> List[str]:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = '''</s>'''
lowercase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__lowerCAmelCase ) , 1103 )
def _lowerCAmelCase( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase__ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase__ : int = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowercase__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : str = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 150, '''short example''']
lowercase__ : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Optional[Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : Dict = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _lowerCAmelCase( self ) -> int:
# fmt: off
lowercase__ : List[Any] = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Optional[Any] = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> int:
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowercase__ : Dict = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Union[str, Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : str = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase__ : Dict = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 428 | 0 |
def _lowerCAmelCase ( A__ ):
lowercase__ = [0] * len(A__ )
lowercase__ = []
lowercase__ = []
lowercase__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A__ ) ):
if indegree[i] == 0:
queue.append(A__ )
while queue:
lowercase__ = queue.pop(0 )
cnt += 1
topo.append(A__ )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A__ )
if cnt != len(A__ ):
print('Cycle exists' )
else:
print(A__ )
# Adjacency List of Graph
a__ : Optional[int] = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 622 |
def _lowerCAmelCase ( A__ = 600_851_475_143 ):
try:
lowercase__ = int(A__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowercase__ = 2
lowercase__ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
lowercase__ = i
while n % i == 0:
lowercase__ = n // i
i += 1
return int(A__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 622 | 1 |
"""simple docstring"""
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCamelCase : Optional[Any] = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class lowercase ( unittest.TestCase):
__lowerCAmelCase : Optional[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__lowerCAmelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__lowerCAmelCase : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__lowerCAmelCase : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def a_ ( self : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Any ):
"""simple docstring"""
A_ : int = ZeroShotClassificationPipeline(
model=_lowerCamelCase , tokenizer=_lowerCamelCase , candidate_labels=['''polics''', '''health'''] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def a_ ( self : List[str] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics''' )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
# No kwarg
A_ : Tuple = classifier('''Who are you voting for in 2020?''' , ['''politics'''] )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
A_ : List[Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics'''] )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
A_ : Union[str, Any] = classifier('''Who are you voting for in 2020?''' , candidate_labels='''politics, public health''' )
self.assertEqual(
_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A_ : Tuple = classifier('''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health'''] )
self.assertEqual(
_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['''scores'''] ) ) , 1.0 )
A_ : List[str] = classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''This text is about {}''' )
self.assertEqual(_lowerCamelCase , {'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
A_ : str = classifier(['''I am happy'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(1 )
] , )
A_ : str = classifier(['''I am happy''', '''I am sad'''] , ['''positive''', '''negative'''] )
self.assertEqual(
_lowerCamelCase , [
{'''sequence''': ANY(_lowerCamelCase ), '''labels''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )], '''scores''': [ANY(_lowerCamelCase ), ANY(_lowerCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(_lowerCamelCase ):
classifier('''''' , candidate_labels='''politics''' )
with self.assertRaises(_lowerCamelCase ):
classifier(_lowerCamelCase , candidate_labels='''politics''' )
with self.assertRaises(_lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels='''''' )
with self.assertRaises(_lowerCamelCase ):
classifier('''Who are you voting for in 2020?''' , candidate_labels=_lowerCamelCase )
with self.assertRaises(_lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template='''Not formatting template''' , )
with self.assertRaises(_lowerCamelCase ):
classifier(
'''Who are you voting for in 2020?''' , candidate_labels='''politics''' , hypothesis_template=_lowerCamelCase , )
self.run_entailment_id(_lowerCamelCase )
def a_ ( self : Any , _lowerCamelCase : Pipeline ):
"""simple docstring"""
A_ : int = zero_shot_classifier.model.config
A_ : Dict = config.labelaid
A_ : Optional[int] = zero_shot_classifier.entailment_id
A_ : Optional[Any] = {'''LABEL_0''': 0, '''LABEL_1''': 1, '''LABEL_2''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
A_ : Union[str, Any] = {'''entailment''': 0, '''neutral''': 1, '''contradiction''': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ : int = {'''ENTAIL''': 0, '''NON-ENTAIL''': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
A_ : Optional[Any] = {'''ENTAIL''': 2, '''NEUTRAL''': 1, '''CONTR''': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
A_ : List[Any] = original_labelaid
self.assertEqual(_lowerCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def a_ ( self : List[Any] ):
"""simple docstring"""
A_ : List[str] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'''Who are you voting for in 2020?''' * 1_00 , candidate_labels=['''politics''', '''public health''', '''science'''] )
@require_torch
def a_ ( self : Dict ):
"""simple docstring"""
A_ : Optional[Any] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''pt''' , )
A_ : Optional[Any] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@require_tf
def a_ ( self : Dict ):
"""simple docstring"""
A_ : List[str] = pipeline(
'''zero-shot-classification''' , model='''sshleifer/tiny-distilbert-base-cased-distilled-squad''' , framework='''tf''' , )
A_ : List[str] = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''science''', '''public health''', '''politics'''],
'''scores''': [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def a_ ( self : int ):
"""simple docstring"""
A_ : Union[str, Any] = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''pt''' )
A_ : str = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A_ : str = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def a_ ( self : Union[str, Any] ):
"""simple docstring"""
A_ : Tuple = pipeline('''zero-shot-classification''' , model='''roberta-large-mnli''' , framework='''tf''' )
A_ : str = zero_shot_classifier(
'''Who are you voting for in 2020?''' , candidate_labels=['''politics''', '''public health''', '''science'''] )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': '''Who are you voting for in 2020?''',
'''labels''': ['''politics''', '''public health''', '''science'''],
'''scores''': [0.976, 0.015, 0.009],
} , )
A_ : Tuple = zero_shot_classifier(
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'''
''' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'''
''' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'''
''' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'''
''' machine translation tasks show these models to be superior in quality while being more parallelizable'''
''' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'''
''' English-to-German translation task, improving over the existing best results, including ensembles by'''
''' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'''
''' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'''
''' fraction of the training costs of the best models from the literature. We show that the Transformer'''
''' generalizes well to other tasks by applying it successfully to English constituency parsing both with'''
''' large and limited training data.''' , candidate_labels=['''machine learning''', '''statistics''', '''translation''', '''vision'''] , multi_label=_lowerCamelCase , )
self.assertEqual(
nested_simplify(_lowerCamelCase ) , {
'''sequence''': (
'''The dominant sequence transduction models are based on complex recurrent or convolutional neural'''
''' networks in an encoder-decoder configuration. The best performing models also connect the'''
''' encoder and decoder through an attention mechanism. We propose a new simple network'''
''' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'''
''' and convolutions entirely. Experiments on two machine translation tasks show these models to be'''
''' superior in quality while being more parallelizable and requiring significantly less time to'''
''' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'''
''' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'''
''' English-to-French translation task, our model establishes a new single-model state-of-the-art'''
''' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'''
''' costs of the best models from the literature. We show that the Transformer generalizes well to'''
''' other tasks by applying it successfully to English constituency parsing both with large and'''
''' limited training data.'''
),
'''labels''': ['''translation''', '''machine learning''', '''vision''', '''statistics'''],
'''scores''': [0.817, 0.713, 0.018, 0.018],
} , )
| 361 | 1 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _lowercase( __a : Dict , __a : Union[str, Any] , __a : List[str] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , __a )
a__ =datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a__ =dataset_size < in_memory_max_size
else:
a__ =False
a__ =is_small_dataset(__a )
assert result == expected
| 20 |
"""simple docstring"""
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __UpperCAmelCase :
def UpperCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : Any = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : str = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
a__ : Optional[int] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
a__ : List[Any] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
a__ : Dict = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
a__ : Optional[int] = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=a_ , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
a__ : Tuple = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
a__ : Optional[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self : List[Any] ) -> Tuple:
'''simple docstring'''
a__ : Dict = self.get_dummy_components()
a__ : Any = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : Any = self.get_dummy_inputs(a_ )
a__ : Optional[int] = inputs["prompt"]
a__ : List[Any] = inputs["generator"]
a__ : Optional[int] = inputs["num_inference_steps"]
a__ : Any = inputs["output_type"]
if "image" in inputs:
a__ : Any = inputs["image"]
else:
a__ : Dict = None
if "mask_image" in inputs:
a__ : Optional[int] = inputs["mask_image"]
else:
a__ : Any = None
if "original_image" in inputs:
a__ : List[Any] = inputs["original_image"]
else:
a__ : str = None
a__ , a__ : Optional[int] = pipe.encode_prompt(a_ )
# inputs with prompt converted to embeddings
a__ : Union[str, Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
a__ : Dict = image
if mask_image is not None:
a__ : Any = mask_image
if original_image is not None:
a__ : Optional[int] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(a_ , a_ , a_ )
a__ : int = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
a__ : List[str] = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(a_ , a_ ) is None , F"`{optional_component}` did not stay set to None after loading." , )
a__ : Union[str, Any] = self.get_dummy_inputs(a_ )
a__ : str = inputs["generator"]
a__ : Dict = inputs["num_inference_steps"]
a__ : Optional[int] = inputs["output_type"]
# inputs with prompt converted to embeddings
a__ : List[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
a__ : Dict = image
if mask_image is not None:
a__ : Any = mask_image
if original_image is not None:
a__ : Dict = original_image
a__ : Optional[Any] = pipe_loaded(**a_ )[0]
a__ : int = np.abs(to_np(a_ ) - to_np(a_ ) ).max()
self.assertLess(a_ , 1E-4 )
def UpperCAmelCase ( self : int ) -> Any:
'''simple docstring'''
a__ : Dict = self.get_dummy_components()
a__ : Dict = self.pipeline_class(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : List[str] = self.get_dummy_inputs(a_ )
a__ : Dict = pipe(**a_ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(a_ )
a__ : str = self.pipeline_class.from_pretrained(a_ )
pipe_loaded.to(a_ )
pipe_loaded.set_progress_bar_config(disable=a_ )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
a__ : Optional[int] = self.get_dummy_inputs(a_ )
a__ : Optional[int] = pipe_loaded(**a_ )[0]
a__ : List[Any] = np.abs(to_np(a_ ) - to_np(a_ ) ).max()
self.assertLess(a_ , 1E-4 ) | 642 | 0 |
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
UpperCamelCase_ = 'tiny-wmt19-en-ru'
# Build
# borrowed from a test
UpperCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
UpperCamelCase_ = dict(zip(vocab, range(len(vocab))))
UpperCamelCase_ = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = Path(tmpdirname)
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES['src_vocab_file']
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES['tgt_vocab_file']
UpperCamelCase_ = build_dir / VOCAB_FILES_NAMES['merges_file']
with open(src_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, 'w') as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, 'w') as fp:
fp.write('\n'.join(merges))
UpperCamelCase_ = FSMTTokenizer(
langs=['en', 'ru'],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
UpperCamelCase_ = FSMTConfig(
langs=['ru', 'en'],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
UpperCamelCase_ = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
UpperCamelCase_ = tokenizer(['Making tiny model'], return_tensors='pt')
UpperCamelCase_ = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru | 210 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->Optional[int]:
"""simple docstring"""
a_ = {
"7z": (seven_zip_file, SevenZipExtractor),
"bz2": (bza_file, BzipaExtractor),
"gzip": (gz_file, GzipExtractor),
"lz4": (lza_file, LzaExtractor),
"tar": (tar_file, TarExtractor),
"xz": (xz_file, XzExtractor),
"zip": (zip_file, ZipExtractor),
"zstd": (zstd_file, ZstdExtractor),
}
a_ , a_ = input_paths_and_base_extractors[compression_format]
if input_path is None:
a_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase )
assert base_extractor.is_extractable(UpperCAmelCase )
a_ = tmp_path / ("extracted" if is_archive else "extracted.txt")
base_extractor.extract(UpperCAmelCase , UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a_ = file_path.read_text(encoding="utf-8" )
else:
a_ = output_path.read_text(encoding="utf-8" )
a_ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) ->Optional[int]:
"""simple docstring"""
a_ = {
"7z": seven_zip_file,
"bz2": bza_file,
"gzip": gz_file,
"lz4": lza_file,
"tar": tar_file,
"xz": xz_file,
"zip": zip_file,
"zstd": zstd_file,
}
a_ = input_paths[compression_format]
if input_path is None:
a_ = F'''for \'{compression_format}\' compression_format, '''
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(UpperCAmelCase )
a_ = Extractor.infer_extractor_format(UpperCAmelCase )
assert extractor_format is not None
a_ = tmp_path / ("extracted" if is_archive else "extracted.txt")
Extractor.extract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
a_ = file_path.read_text(encoding="utf-8" )
else:
a_ = output_path.read_text(encoding="utf-8" )
a_ = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ) ->Dict:
"""simple docstring"""
import tarfile
a_ = tmp_path / "data_dot_dot"
directory.mkdir()
a_ = directory / "tar_file_with_dot_dot.tar"
with tarfile.TarFile(UpperCAmelCase , "w" ) as f:
f.add(UpperCAmelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def UpperCamelCase ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
import tarfile
a_ = tmp_path / "data_sym_link"
directory.mkdir()
a_ = directory / "tar_file_with_sym_link.tar"
os.symlink(".." , directory / "subdir" , target_is_directory=UpperCAmelCase )
with tarfile.TarFile(UpperCAmelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->List[str]:
"""simple docstring"""
a_ = {
"tar_file_with_dot_dot": tar_file_with_dot_dot,
"tar_file_with_sym_link": tar_file_with_sym_link,
}
a_ = insecure_tar_files[insecure_tar_file]
a_ = tmp_path / "extracted"
TarExtractor.extract(UpperCAmelCase , UpperCAmelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def UpperCamelCase ( UpperCAmelCase ) ->str:
"""simple docstring"""
a_ = tmpdir / "not_a_zip_file"
# From: https://github.com/python/cpython/pull/5053
a_ = (
B"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"
B"\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"
B"DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"
B"\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"
)
with not_a_zip_file.open("wb" ) as f:
f.write(UpperCAmelCase )
assert zipfile.is_zipfile(str(UpperCAmelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(UpperCAmelCase ) # but we're right | 210 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def lowerCAmelCase_( *lowercase_ : Optional[int] , lowercase_ : Optional[Union[Dict, Any]] = None , lowercase_ : int=True , lowercase_ : Tuple=2 ) -> int:
from .. import __version__
_lowerCamelCase = take_from
_lowerCamelCase = ()
if not isinstance(args[0] , lowercase_ ):
_lowerCamelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase_ ).base_version ) >= version.parse(lowercase_ ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
_lowerCamelCase = None
if isinstance(lowercase_ , lowercase_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase_ ),)
_lowerCamelCase = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowercase_ , lowercase_ ):
values += (getattr(lowercase_ , lowercase_ ),)
_lowerCamelCase = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
_lowerCamelCase = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
_lowerCamelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase_ , stacklevel=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) > 0:
_lowerCamelCase = inspect.getouterframes(inspect.currentframe() )[1]
_lowerCamelCase = call_frame.filename
_lowerCamelCase = call_frame.lineno
_lowerCamelCase = call_frame.function
_lowerCamelCase , _lowerCamelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(lowercase_ ) == 0:
return
elif len(lowercase_ ) == 1:
return values[0]
return values
| 661 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
__SCREAMING_SNAKE_CASE : Optional[int] = '''
@inproceedings{xu-etal-2016-optimizing,
title = {Optimizing Statistical Machine Translation for Text Simplification},
authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},
journal = {Transactions of the Association for Computational Linguistics},
volume = {4},
year={2016},
url = {https://www.aclweb.org/anthology/Q16-1029},
pages = {401--415
},
@inproceedings{post-2018-call,
title = "A Call for Clarity in Reporting {BLEU} Scores",
author = "Post, Matt",
booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",
month = oct,
year = "2018",
address = "Belgium, Brussels",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/W18-6319",
pages = "186--191",
}
'''
__SCREAMING_SNAKE_CASE : List[str] = '''\
WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU
It can be used to evaluate the quality of machine-generated texts.
'''
__SCREAMING_SNAKE_CASE : Optional[Any] = '''
Calculates sari score (between 0 and 100) given a list of source and predicted
sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.
Args:
sources: list of source sentences where each sentence should be a string.
predictions: list of predicted sentences where each sentence should be a string.
references: list of lists of reference sentences where each sentence should be a string.
Returns:
sari: sari score
sacrebleu: sacrebleu score
exact: exact score
Examples:
>>> sources=["About 95 species are currently accepted ."]
>>> predictions=["About 95 you now get in ."]
>>> references=[["About 95 species are currently known ."]]
>>> wiki_split = datasets.load_metric("wiki_split")
>>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)
>>> print(results)
{\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}
'''
def lowerCAmelCase_( lowercase_ : Union[str, Any] ) -> str:
def remove_articles(lowercase_ : int ):
_lowerCamelCase = re.compile(r'''\b(a|an|the)\b''' , re.UNICODE )
return re.sub(lowercase_ , ''' ''' , lowercase_ )
def white_space_fix(lowercase_ : List[Any] ):
return " ".join(text.split() )
def remove_punc(lowercase_ : Dict ):
_lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase_ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowercase_ ) ) ) )
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Union[str, Any]:
return int(normalize_answer(lowercase_ ) == normalize_answer(lowercase_ ) )
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Tuple ) -> Tuple:
_lowerCamelCase = [any(compute_exact(lowercase_ , lowercase_ ) for ref in refs ) for pred, refs in zip(lowercase_ , lowercase_ )]
return (sum(lowercase_ ) / len(lowercase_ )) * 1_00
def lowerCAmelCase_( lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str ) -> Optional[int]:
_lowerCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for sgram, scount in sgramcounter.items():
_lowerCamelCase = scount * numref
_lowerCamelCase = Counter(lowercase_ )
_lowerCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCamelCase = ccount * numref
# KEEP
_lowerCamelCase = sgramcounter_rep & cgramcounter_rep
_lowerCamelCase = keepgramcounter_rep & rgramcounter
_lowerCamelCase = sgramcounter_rep & rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = keeptmpscorea / len(lowercase_ )
if len(lowercase_ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCamelCase = sgramcounter_rep - cgramcounter_rep
_lowerCamelCase = delgramcounter_rep - rgramcounter
_lowerCamelCase = sgramcounter_rep - rgramcounter
_lowerCamelCase = 0
_lowerCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = deltmpscorea / len(lowercase_ )
# ADDITION
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) & set(lowercase_ )
_lowerCamelCase = set(lowercase_ ) - set(lowercase_ )
_lowerCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCamelCase = 1
_lowerCamelCase = 1
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
if len(lowercase_ ) > 0:
_lowerCamelCase = addtmpscore / len(lowercase_ )
_lowerCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCAmelCase_( lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : str ) -> List[str]:
_lowerCamelCase = len(lowercase_ )
_lowerCamelCase = ssent.split(''' ''' )
_lowerCamelCase = csent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
for rsent in rsents:
_lowerCamelCase = rsent.split(''' ''' )
_lowerCamelCase = []
_lowerCamelCase = []
_lowerCamelCase = []
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
ragramslist.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowercase_ )
for i in range(0 , len(lowercase_ ) - 1 ):
if i < len(lowercase_ ) - 1:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 2:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowercase_ )
if i < len(lowercase_ ) - 3:
_lowerCamelCase = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = SARIngram(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCAmelCase_( lowercase_ : List[str] , lowercase_ : bool = True , lowercase_ : str = "13a" , lowercase_ : bool = True ) -> int:
# Normalization is requried for the ASSET dataset (one of the primary
# datasets in sentence simplification) to allow using space
# to split the sentence. Even though Wiki-Auto and TURK datasets,
# do not require normalization, we do it for consistency.
# Code adapted from the EASSE library [1] written by the authors of the ASSET dataset.
# [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7
if lowercase:
_lowerCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCamelCase = sacrebleu.metrics.bleu._get_tokenizer(lowercase_ )()(lowercase_ )
else:
_lowerCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(lowercase_ )
elif tokenizer == "moses":
_lowerCamelCase = sacremoses.MosesTokenizer().tokenize(lowercase_ , return_str=lowercase_ , escape=lowercase_ )
elif tokenizer == "penn":
_lowerCamelCase = sacremoses.MosesTokenizer().penn_tokenize(lowercase_ , return_str=lowercase_ )
else:
_lowerCamelCase = sentence
if not return_str:
_lowerCamelCase = normalized_sent.split()
return normalized_sent
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any] ) -> Optional[int]:
if not (len(lowercase_ ) == len(lowercase_ ) == len(lowercase_ )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
_lowerCamelCase = 0
for src, pred, refs in zip(lowercase_ , lowercase_ , lowercase_ ):
sari_score += SARIsent(normalize(lowercase_ ) , normalize(lowercase_ ) , [normalize(lowercase_ ) for sent in refs] )
_lowerCamelCase = sari_score / len(lowercase_ )
return 1_00 * sari_score
def lowerCAmelCase_( lowercase_ : Any , lowercase_ : Any , lowercase_ : List[Any]="exp" , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=False , lowercase_ : List[Any]=False , lowercase_ : List[Any]=False , ) -> Dict:
_lowerCamelCase = len(references[0] )
if any(len(lowercase_ ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
_lowerCamelCase = [[refs[i] for refs in references] for i in range(lowercase_ )]
_lowerCamelCase = sacrebleu.corpus_bleu(
lowercase_ , lowercase_ , smooth_method=lowercase_ , smooth_value=lowercase_ , force=lowercase_ , lowercase=lowercase_ , use_effective_order=lowercase_ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCamelCase_( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = {}
result.update({'''sari''': compute_sari(sources=lowerCamelCase__ , predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
result.update({'''exact''': compute_em(predictions=lowerCamelCase__ , references=lowerCamelCase__ )} )
return result
| 661 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A ( unittest.TestCase ):
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ =["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
UpperCAmelCase_ =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ ={"unk_token": "<unk>"}
UpperCAmelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowerCAmelCase ) )
UpperCAmelCase_ ={
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCAmelCase_ =os.path.join(self.tmpdirname , _lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , **_lowerCAmelCase: List[str] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: int , **_lowerCAmelCase: Optional[int] ) -> List[str]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] , **_lowerCAmelCase: Union[str, Any] ) -> str:
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ =[Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =self.get_rust_tokenizer()
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase )
UpperCAmelCase_ =OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase_ =self.get_image_processor(do_normalize=_lowerCAmelCase )
UpperCAmelCase_ =OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =image_processor(_lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ =processor(images=_lowerCAmelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowerCAmelCase__ ( self: List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =processor(text=_lowerCAmelCase , return_tensors="np" )
UpperCAmelCase_ =tokenizer(_lowerCAmelCase , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase__ ( self: List[str] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ ="lower newer"
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(text=_lowerCAmelCase , images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: Any ) -> Any:
'''simple docstring'''
UpperCAmelCase_ ="google/owlvit-base-patch32"
UpperCAmelCase_ =OwlViTProcessor.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =["cat", "nasa badge"]
UpperCAmelCase_ =processor(text=_lowerCAmelCase )
UpperCAmelCase_ =16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ ="google/owlvit-base-patch32"
UpperCAmelCase_ =OwlViTProcessor.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =[["cat", "nasa badge"], ["person"]]
UpperCAmelCase_ =processor(text=_lowerCAmelCase )
UpperCAmelCase_ =16
UpperCAmelCase_ =len(_lowerCAmelCase )
UpperCAmelCase_ =max([len(_lowerCAmelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ ="google/owlvit-base-patch32"
UpperCAmelCase_ =OwlViTProcessor.from_pretrained(_lowerCAmelCase )
UpperCAmelCase_ =["cat", "nasa badge"]
UpperCAmelCase_ =processor(text=_lowerCAmelCase )
UpperCAmelCase_ =16
UpperCAmelCase_ =inputs["input_ids"]
UpperCAmelCase_ =[
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase__ ( self: Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =self.prepare_image_inputs()
UpperCAmelCase_ =processor(images=_lowerCAmelCase , query_images=_lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.get_image_processor()
UpperCAmelCase_ =self.get_tokenizer()
UpperCAmelCase_ =OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase )
UpperCAmelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ =processor.batch_decode(_lowerCAmelCase )
UpperCAmelCase_ =tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 710 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def a__ ( lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ ={
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCAmelCase_ ={
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCAmelCase_ =F'{src_lang}-{tgt_lang}'
UpperCAmelCase_ =F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n'
os.makedirs(lowercase__ , exist_ok=lowercase__ )
UpperCAmelCase_ =os.path.join(lowercase__ , "README.md" )
print(F'Generating {path}' )
with open(lowercase__ , "w" , encoding="utf-8" ) as f:
f.write(lowercase__ )
# make sure we are under the root of the project
__lowercase : int =Path(__file__).resolve().parent.parent.parent
__lowercase : Any =repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowercase , __lowercase , __lowercase : Union[str, Any] =model_name.split("""-""")
__lowercase : Optional[int] =model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 550 | 0 |
from math import factorial
def _A ( lowerCAmelCase_ : int = 20 ):
"""simple docstring"""
lowerCAmelCase__ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowerCAmelCase__ = n // 2
return int(factorial(lowerCAmelCase_ ) / (factorial(lowerCAmelCase_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 61 |
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : int = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Tuple = tf.cast(math.pi , x.dtype )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : List[Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__magic_name__ , 3 )) ))
return x * cdf
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = tf.convert_to_tensor(__magic_name__ )
return x * tf.tanh(tf.math.softplus(__magic_name__ ) )
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : List[str] = tf.cast(0.0_4_4_7_1_5 , x.dtype )
UpperCAmelCase : int = tf.cast(0.7_9_7_8_8_4_5_6_0_8 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : int = tf.convert_to_tensor(__magic_name__ )
UpperCAmelCase : Optional[Any] = tf.cast(1.7_0_2 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(__magic_name__ ) , -10 , 10 )
def lowercase ( __magic_name__ , __magic_name__=-1 ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Dict = tf.split(__magic_name__ , 2 , axis=__magic_name__ )
return a * tf.math.sigmoid(__magic_name__ )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( __magic_name__ ):
'''simple docstring'''
return tf.keras.activations.gelu(__magic_name__ , approximate=__magic_name__ )
a : Tuple = tf.keras.activations.gelu
a : Dict = approximate_gelu_wrap
else:
a : List[str] = _gelu
a : List[Any] = _gelu_new
a : Optional[int] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( __magic_name__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}" )
| 679 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError('check_bouncy() accepts only integer arguments' )
SCREAMING_SNAKE_CASE_ :List[str] = str(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = ''.join(sorted(SCREAMING_SNAKE_CASE ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE = 99 ):
if not 0 < percent < 100:
raise ValueError('solution() only accepts values from 0 to 100' )
SCREAMING_SNAKE_CASE_ :List[Any] = 0
SCREAMING_SNAKE_CASE_ :List[Any] = 1
while True:
if check_bouncy(SCREAMING_SNAKE_CASE ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""")
| 707 |
'''simple docstring'''
from PIL import Image
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :List[Any] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(SCREAMING_SNAKE_CASE ) -> int:
return int(128 + factor * (c - 128) )
return img.point(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change contrast to 170
SCREAMING_SNAKE_CASE__ : int = change_contrast(img, 1_70)
cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
| 233 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ['''image_processor''', '''tokenizer''']
_SCREAMING_SNAKE_CASE = '''AutoImageProcessor'''
_SCREAMING_SNAKE_CASE = '''AutoTokenizer'''
def __init__( self : List[str] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : int ) -> Any:
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase_ , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case_ = self.image_processor
snake_case_ = False
def __call__( self : str , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase_ , **UpperCAmelCase_ )
snake_case_ = kwargs.pop("images" , UpperCAmelCase_ )
snake_case_ = kwargs.pop("text" , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
snake_case_ = args[0]
snake_case_ = args[1:]
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
snake_case_ = self.image_processor(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
if text is not None:
snake_case_ = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case_ = encodings["input_ids"]
return inputs
def lowerCAmelCase__ ( self : Optional[Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCAmelCase__ ( self : Dict , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@contextmanager
def lowerCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your images inputs, or in a separate call." )
snake_case_ = True
snake_case_ = self.tokenizer
yield
snake_case_ = self.image_processor
snake_case_ = False
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Tuple=None ) -> List[Any]:
"""simple docstring"""
if added_vocab is None:
snake_case_ = self.tokenizer.get_added_vocab()
snake_case_ = {}
while tokens:
snake_case_ = re.search(R"<s_(.*?)>" , UpperCAmelCase_ , re.IGNORECASE )
if start_token is None:
break
snake_case_ = start_token.group(1 )
snake_case_ = re.search(RF'''</s_{key}>''' , UpperCAmelCase_ , re.IGNORECASE )
snake_case_ = start_token.group()
if end_token is None:
snake_case_ = tokens.replace(UpperCAmelCase_ , "" )
else:
snake_case_ = end_token.group()
snake_case_ = re.escape(UpperCAmelCase_ )
snake_case_ = re.escape(UpperCAmelCase_ )
snake_case_ = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , UpperCAmelCase_ , re.IGNORECASE )
if content is not None:
snake_case_ = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
snake_case_ = self.tokenajson(UpperCAmelCase_ , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if value:
if len(UpperCAmelCase_ ) == 1:
snake_case_ = value[0]
snake_case_ = value
else: # leaf nodes
snake_case_ = []
for leaf in content.split(R"<sep/>" ):
snake_case_ = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
snake_case_ = leaf[1:-2] # for categorical special tokens
output[key].append(UpperCAmelCase_ )
if len(output[key] ) == 1:
snake_case_ = output[key][0]
snake_case_ = tokens[tokens.find(UpperCAmelCase_ ) + len(UpperCAmelCase_ ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCAmelCase_ , added_vocab=UpperCAmelCase_ )
if len(UpperCAmelCase_ ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase_ , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase_ , )
return self.image_processor
| 283 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case = logging.get_logger(__name__)
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[Any] = [144, 192, 240]
SCREAMING_SNAKE_CASE : Tuple = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[str] = [96, 120, 144]
SCREAMING_SNAKE_CASE : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE : List[str] = [64, 80, 96]
SCREAMING_SNAKE_CASE : List[str] = [16, 16, 24, 48, 64, 80, 320]
SCREAMING_SNAKE_CASE : int = 0.05
SCREAMING_SNAKE_CASE : int = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE : str = 512
SCREAMING_SNAKE_CASE : List[str] = 16
SCREAMING_SNAKE_CASE : Union[str, Any] = 21
SCREAMING_SNAKE_CASE : Dict = "pascal-voc-id2label.json"
else:
SCREAMING_SNAKE_CASE : Optional[Any] = 1000
SCREAMING_SNAKE_CASE : Optional[Any] = "imagenet-1k-id2label.json"
SCREAMING_SNAKE_CASE : Any = "huggingface/label-files"
SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
SCREAMING_SNAKE_CASE : List[str] = {int(lowercase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = idalabel
SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( lowercase , lowercase=False ):
"""simple docstring"""
for i in range(1 , 6 ):
if F'''layer_{i}.''' in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(F'''layer_{i}.''' , F'''encoder.layer.{i - 1}.''' )
if "conv_1." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace("conv_1." , "conv_stem." )
if ".block." in name:
SCREAMING_SNAKE_CASE : List[str] = name.replace(".block." , "." )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE : str = name.replace("exp_1x1" , "expand_1x1" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("red_1x1" , "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".local_rep.conv_3x3." , ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE : int = name.replace(".local_rep.conv_1x1." , ".conv_1x1." )
if ".norm." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(".norm." , ".normalization." )
if ".conv." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(".conv." , ".convolution." )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".conv_proj." , ".conv_projection." )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.layer.{j}.''' )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F'''.{i}.{j}.''' in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F'''.{i}.{j}.''' , F'''.{i}.''' )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("expand_1x1" , "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE : str = name.replace("conv_3x3" , "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("reduce_1x1" , "downsampling_layer.reduce_1x1" )
for i in range(2 , 5 ):
if F'''.global_rep.{i}.weight''' in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace(F'''.global_rep.{i}.weight''' , ".layernorm.weight" )
if F'''.global_rep.{i}.bias''' in name:
SCREAMING_SNAKE_CASE : str = name.replace(F'''.global_rep.{i}.bias''' , ".layernorm.bias" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE : Dict = name.replace(".global_rep." , ".transformer." )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".pre_norm_mha.0." , ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".pre_norm_mha.1.out_proj." , ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(".pre_norm_ffn.0." , ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".pre_norm_ffn.1." , ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".pre_norm_ffn.4." , ".output.dense." )
if ".transformer." in name:
SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace(".transformer." , ".transformer.layer." )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE : int = name.replace(".aspp_layer." , "." )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE : Tuple = name.replace(".aspp_pool." , "." )
if "seg_head." in name:
SCREAMING_SNAKE_CASE : Optional[int] = name.replace("seg_head." , "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("segmentation_head.classifier.classifier." , "segmentation_head.classifier." )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE : List[Any] = name.replace("classifier.fc." , "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE : List[Any] = "mobilevit." + name
return name
def lowerCamelCase__ ( lowercase , lowercase , lowercase=False ):
"""simple docstring"""
if base_model:
SCREAMING_SNAKE_CASE : Optional[int] = ""
else:
SCREAMING_SNAKE_CASE : Any = "mobilevit."
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(lowercase )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE : int = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE : Optional[int] = key.split("." )
SCREAMING_SNAKE_CASE : Any = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE : List[Any] = int(key_split[3] )
SCREAMING_SNAKE_CASE : List[Any] = model.get_submodule(F'''{model_prefix}encoder.layer.{layer_num}''' )
SCREAMING_SNAKE_CASE : int = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE : Union[str, Any] = (
F'''{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'''
)
if "weight" in key:
SCREAMING_SNAKE_CASE : Optional[int] = val[:dim, :]
SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE : str = val[:dim]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[dim : dim * 2]
SCREAMING_SNAKE_CASE : Union[str, Any] = val[-dim:]
else:
SCREAMING_SNAKE_CASE : List[Any] = val
return orig_state_dict
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg"
SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = get_mobilevit_config(lowercase )
# load original state_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(lowercase , map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
SCREAMING_SNAKE_CASE : List[str] = MobileViTForSemanticSegmentation(lowercase ).eval()
else:
SCREAMING_SNAKE_CASE : str = MobileViTForImageClassification(lowercase ).eval()
SCREAMING_SNAKE_CASE : Any = convert_state_dict(lowercase , lowercase )
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : str = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE : Dict = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'''Unknown mobilevit_name: {mobilevit_name}''' )
assert torch.allclose(logits[0, :3] , lowercase , atol=1E-4 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'''Saving model {mobilevit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase )
if push_to_hub:
SCREAMING_SNAKE_CASE : List[str] = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
SCREAMING_SNAKE_CASE : int = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowercase , organization="apple" )
model.push_to_hub(lowercase , organization="apple" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 62 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case: List[str] = logging.get_logger(__name__)
__snake_case: Dict = "https://openaipublic.azureedge.net/jukebox/models/"
__snake_case: List[str] = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def _snake_case ( A_ : str ):
"""simple docstring"""
if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Optional[Any] = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" )
elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : Tuple = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" )
elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10:
a_ : Union[str, Any] = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" )
elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10:
a_ : List[str] = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" )
if "conditioner_blocks.0." in key:
a_ : str = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" )
if "prime_prior" in key:
a_ : Any = key.replace("""prime_prior""" , """encoder""" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
a_ : Optional[Any] = key.replace(""".emb.""" , """.""" )
if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(""".k""" , """.codebook""" )
if "y_emb." in key:
return key.replace("""y_emb.""" , """metadata_embedding.""" )
if "x_emb.emb." in key:
a_ : Optional[Any] = key.replace("""0.x_emb.emb""" , """embed_tokens""" )
if "prime_state_ln" in key:
return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" )
if ".ln" in key:
return key.replace(""".ln""" , """.layer_norm""" )
if "_ln" in key:
return key.replace("""_ln""" , """_layer_norm""" )
if "prime_state_proj" in key:
return key.replace("""prime_state_proj""" , """encoder.proj_in""" )
if "prime_x_out" in key:
return key.replace("""prime_x_out""" , """encoder.lm_head""" )
if "prior.x_out" in key:
return key.replace("""x_out""" , """fc_proj_out""" )
if "x_emb" in key:
return key.replace("""x_emb""" , """embed_tokens""" )
return key
def _snake_case ( A_ : str , A_ : int , A_ : str , A_ : List[Any] ):
"""simple docstring"""
a_ : List[str] = {}
import re
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : int = re.compile(
R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Optional[Any] = re.compile(R"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : int = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" )
a_ : List[str] = re.compile(
R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : List[Any] = re.compile(R"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" )
a_ : Tuple = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" )
a_ : Dict = re.compile(
R"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" )
a_ : Any = re.compile(R"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A_ ):
a_ : Any = re_encoder_block_conv_in.match(A_ )
a_ : str = regex_match.groups()
a_ : Tuple = int(groups[2] ) * 2 + int(groups[3] )
a_ : Optional[Any] = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'''
a_ : Any = re_encoder_block_conv_in.sub(A_ , A_ )
elif re_encoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_resnet.match(A_ )
a_ : Optional[Any] = regex_match.groups()
a_ : str = int(groups[2] ) * 2 + int(groups[3] )
a_ : str = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : str = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'''
a_ : List[Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_encoder_block_resnet.sub(A_ , A_ )
elif re_encoder_block_proj_out.fullmatch(A_ ):
a_ : Tuple = re_encoder_block_proj_out.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : int = f'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'''
a_ : Optional[Any] = re_encoder_block_proj_out.sub(A_ , A_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A_ ):
a_ : Union[str, Any] = re_decoder_block_conv_out.match(A_ )
a_ : Union[str, Any] = regex_match.groups()
a_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[int] = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'''
a_ : int = re_decoder_block_conv_out.sub(A_ , A_ )
elif re_decoder_block_resnet.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_resnet.match(A_ )
a_ : str = regex_match.groups()
a_ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
a_ : Optional[Any] = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Any = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'''
a_ : int = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : Optional[Any] = prefix + resnet_block
a_ : Any = re_decoder_block_resnet.sub(A_ , A_ )
elif re_decoder_block_proj_in.fullmatch(A_ ):
a_ : Tuple = re_decoder_block_proj_in.match(A_ )
a_ : Any = regex_match.groups()
a_ : Dict = f'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'''
a_ : Tuple = re_decoder_block_proj_in.sub(A_ , A_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A_ ):
a_ : int = re_prior_cond_conv_out.match(A_ )
a_ : Dict = regex_match.groups()
a_ : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Any = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'''
a_ : str = re_prior_cond_conv_out.sub(A_ , A_ )
elif re_prior_cond_resnet.fullmatch(A_ ):
a_ : List[str] = re_prior_cond_resnet.match(A_ )
a_ : List[str] = regex_match.groups()
a_ : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
a_ : Tuple = {"""1""": 1, """3""": 2}[groups[-2]]
a_ : Optional[Any] = f'''conditioner_blocks.upsampler.upsample_block.{block_index}.'''
a_ : Union[str, Any] = f'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'''
a_ : List[str] = prefix + resnet_block
a_ : Optional[Any] = re_prior_cond_resnet.sub(A_ , A_ )
elif re_prior_cond_proj_in.fullmatch(A_ ):
a_ : List[Any] = re_prior_cond_proj_in.match(A_ )
a_ : int = regex_match.groups()
a_ : Any = f'''conditioner_blocks.upsampler.proj_in.{groups[-1]}'''
a_ : Union[str, Any] = re_prior_cond_proj_in.sub(A_ , A_ )
# keep original key
else:
a_ : str = original_key
a_ : Any = replace_key(A_ )
if f'''{key_prefix}.{key}''' not in model_state_dict or key is None:
print(f'''failed converting {original_key} to {key}, does not match''' )
# handle missmatched shape
elif value.shape != model_state_dict[f'''{key_prefix}.{key}'''].shape:
a_ : Tuple = model_state_dict[f'''{key_prefix}.{key}''']
print(f'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' )
a_ : Optional[Any] = original_key
a_ : Tuple = original_key
a_ : Union[str, Any] = value
return new_dict
@torch.no_grad()
def _snake_case ( A_ : Dict=None , A_ : Optional[Any]=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ):
a_ : Any = requests.get(f'''{PREFIX}{file}''' , allow_redirects=A_ )
os.makedirs(f'''{pytorch_dump_folder_path}/''' , exist_ok=A_ )
open(f'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , """wb""" ).write(r.content )
a_ : List[Any] = MODEL_MAPPING[model_name.split("""/""" )[-1]]
a_ : Optional[Any] = JukeboxConfig.from_pretrained(A_ )
a_ : List[Any] = JukeboxModel(A_ )
a_ : Optional[Any] = []
a_ : Optional[Any] = {}
for i, dict_name in enumerate(A_ ):
a_ : int = torch.load(f'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["""model"""]
a_ : Optional[int] = {}
for k in old_dic.keys():
if k.endswith(""".b""" ):
a_ : int = old_dic[k]
elif k.endswith(""".w""" ):
a_ : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
a_ : Dict = old_dic[k]
else:
a_ : Optional[int] = old_dic[k]
a_ : List[Any] = """vqvae""" if i == 0 else f'''priors.{3 - i}'''
a_ : Any = fix_jukebox_keys(A_ , model.state_dict() , A_ , A_ )
weight_dict.append(A_ )
a_ : str = weight_dict.pop(0 )
model.vqvae.load_state_dict(A_ )
for i in range(len(A_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A_ ).mkdir(exist_ok=A_ )
with open(f'''{pytorch_dump_folder_path}/mapping.json''' , """w""" ) as txtfile:
json.dump(A_ , A_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
return weight_dict
if __name__ == "__main__":
__snake_case: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__snake_case: Any = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 460 |
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = PriorTransformer
a_ = "hidden_states"
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = 4
a_ : List[str] = 8
a_ : List[str] = 7
a_ : Tuple = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self , lowerCAmelCase_=0 ):
'''simple docstring'''
torch.manual_seed(lowerCAmelCase_ )
a_ : List[Any] = 4
a_ : Union[str, Any] = 8
a_ : Optional[int] = 7
a_ : List[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : Optional[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return (4, 8)
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return (4, 8)
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Union[str, Any] = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
a_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[Any] = PriorTransformer.from_pretrained(
"""hf-internal-testing/prior-dummy""" , output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCAmelCase_ )
a_ : List[Any] = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ , a_ : Optional[int] = self.prepare_init_args_and_inputs_for_common()
a_ : str = self.model_class(**lowerCAmelCase_ )
a_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : str = [*signature.parameters.keys()]
a_ : str = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] , lowerCAmelCase_ )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[Any] = PriorTransformer.from_pretrained("""hf-internal-testing/prior-dummy""" )
a_ : Optional[Any] = model.to(lowerCAmelCase_ )
if hasattr(lowerCAmelCase_ , """set_default_attn_processor""" ):
model.set_default_attn_processor()
a_ : Tuple = self.get_dummy_seed_input()
with torch.no_grad():
a_ : List[str] = model(**lowerCAmelCase_ )[0]
a_ : Optional[Any] = output[0, :5].flatten().cpu()
print(lowerCAmelCase_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
a_ : List[str] = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-2 ) )
@slow
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self , lowerCAmelCase_=1 , lowerCAmelCase_=7_68 , lowerCAmelCase_=77 , lowerCAmelCase_=0 ):
'''simple docstring'''
torch.manual_seed(lowerCAmelCase_ )
a_ : str = batch_size
a_ : Dict = embedding_dim
a_ : Tuple = num_embeddings
a_ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[str] = torch.randn((batch_size, embedding_dim) ).to(lowerCAmelCase_ )
a_ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowerCAmelCase_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def _lowerCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def _lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
a_ : Any = PriorTransformer.from_pretrained("""kandinsky-community/kandinsky-2-1-prior""" , subfolder="""prior""" )
model.to(lowerCAmelCase_ )
a_ : Optional[int] = self.get_dummy_seed_input(seed=lowerCAmelCase_ )
with torch.no_grad():
a_ : str = model(**lowerCAmelCase_ )[0]
assert list(sample.shape ) == [1, 7_68]
a_ : int = sample[0, :8].flatten().cpu()
print(lowerCAmelCase_ )
a_ : str = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-3 )
| 460 | 1 |
'''simple docstring'''
def a_ ( __snake_case : Tuple = 10 , __snake_case : Optional[int] = 1000 , __snake_case : Tuple = True ) -> Optional[int]:
"""simple docstring"""
assert (
isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
and isinstance(__snake_case , __snake_case )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def a_ ( __snake_case : List[str] , __snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return int((number_a + number_a) / 2 )
def a_ ( __snake_case : List[str] , __snake_case : List[Any] , __snake_case : Tuple ) -> Tuple:
"""simple docstring"""
assert (
isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case ) and isinstance(__snake_case , __snake_case )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(__snake_case : Optional[Any] ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
lowerCamelCase_ =lower
lowerCamelCase_ =higher
lowerCamelCase_ =[]
while True:
lowerCamelCase_ =get_avg(__snake_case , __snake_case )
last_numbers.append(__snake_case )
if answer(__snake_case ) == "low":
lowerCamelCase_ =number
elif answer(__snake_case ) == "high":
lowerCamelCase_ =number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def a_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ =int(input('''Enter lower value : ''' ).strip() )
lowerCamelCase_ =int(input('''Enter high value : ''' ).strip() )
lowerCamelCase_ =int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(__snake_case , __snake_case , __snake_case )
if __name__ == "__main__":
main()
| 676 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class _snake_case( UpperCAmelCase ):
__snake_case: Union[List[PIL.Image.Image], np.ndarray]
__snake_case: Optional[List[bool]]
__snake_case: Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 531 | 0 |
def snake_case_ (__A : float ) -> float:
return 1_0 - x * x
def snake_case_ (__A : float , __A : float ) -> float:
# Bolzano theory in order to find if there is a root between a and b
if equation(__A ) * equation(__A ) >= 0:
raise ValueError("""Wrong space!""" )
__lowerCAmelCase : Any = a
while (b - a) >= 0.01:
# Find middle point
__lowerCAmelCase : List[str] = (a + b) / 2
# Check if middle point is root
if equation(__A ) == 0.0:
break
# Decide the side to repeat the steps
if equation(__A ) * equation(__A ) < 0:
__lowerCAmelCase : Any = c
else:
__lowerCAmelCase : int = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 218 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
self.check_model_type(lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase : List[str]=None , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : List[str]=None , **lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
__lowerCAmelCase ,__lowerCAmelCase : List[Any] = {}, {}
if padding is not None:
__lowerCAmelCase : List[Any] = padding
if truncation is not None:
__lowerCAmelCase : int = truncation
if top_k is not None:
__lowerCAmelCase : int = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[int] , lowerCAmelCase : Union["Image.Image", str] , lowerCAmelCase : str = None , **lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
if isinstance(lowerCAmelCase , (Image.Image, str) ) and isinstance(lowerCAmelCase , lowerCAmelCase ):
__lowerCAmelCase : Any = {"""image""": image, """question""": question}
else:
__lowerCAmelCase : List[str] = image
__lowerCAmelCase : str = super().__call__(lowerCAmelCase , **lowerCAmelCase )
return results
def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any]=False , lowerCAmelCase : Union[str, Any]=False ) -> Any:
"""simple docstring"""
__lowerCAmelCase : int = load_image(inputs["""image"""] )
__lowerCAmelCase : Optional[Any] = self.tokenizer(
inputs["""question"""] , return_tensors=self.framework , padding=lowerCAmelCase , truncation=lowerCAmelCase )
__lowerCAmelCase : List[Any] = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
return model_inputs
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.model(**lowerCAmelCase )
return model_outputs
def SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]=5 ) -> Union[str, Any]:
"""simple docstring"""
if top_k > self.model.config.num_labels:
__lowerCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Optional[Any] = model_outputs.logits.sigmoid()[0]
__lowerCAmelCase ,__lowerCAmelCase : Tuple = probs.topk(lowerCAmelCase )
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : int = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(lowerCAmelCase , lowerCAmelCase )]
| 218 | 1 |
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [0] * no_of_processes
__lowercase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A__ ):
__lowercase = burst_time[i]
__lowercase = 0
__lowercase = 0
__lowercase = 999999999
__lowercase = 0
__lowercase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__lowercase = remaining_time[j]
__lowercase = j
__lowercase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__lowercase = remaining_time[short]
if minm == 0:
__lowercase = 999999999
if remaining_time[short] == 0:
complete += 1
__lowercase = False
# Find finish time of current process
__lowercase = increment_time + 1
# Calculate waiting time
__lowercase = finish_time - arrival_time[short]
__lowercase = finar - burst_time[short]
if waiting_time[short] < 0:
__lowercase = 0
# Increment time
increment_time += 1
return waiting_time
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = [0] * no_of_processes
for i in range(A__ ):
__lowercase = burst_time[i] + waiting_time[i]
return turn_around_time
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = 0
__lowercase = 0
for i in range(A__ ):
__lowercase = total_waiting_time + waiting_time[i]
__lowercase = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
lowerCAmelCase__ = int(input())
lowerCAmelCase__ = [0] * no_of_processes
lowerCAmelCase__ = [0] * no_of_processes
lowerCAmelCase__ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
lowerCAmelCase__ , lowerCAmelCase__ = map(int, input().split())
lowerCAmelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCAmelCase__ = burst_time
lowerCAmelCase__ = no_of_processes
lowerCAmelCase__ = waiting_time
lowerCAmelCase__ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowerCAmelCase__ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 41 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> int:
def count_of_possible_combinations(SCREAMING_SNAKE_CASE :int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> int:
def count_of_possible_combinations_with_dp_array(
SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
__lowerCAmelCase : int = sum(
count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE )
for item in array )
__lowerCAmelCase : Optional[Any] = answer
return answer
__lowerCAmelCase : Optional[int] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :list[int] , SCREAMING_SNAKE_CASE :int ) -> int:
__lowerCAmelCase : Tuple = [0] * (target + 1)
__lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(SCREAMING_SNAKE_CASE ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = 3
_UpperCAmelCase = 5
_UpperCAmelCase = [1, 2, 5]
print(combination_sum_iv(n, array, target)) | 504 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase ( __snake_case ):
'''simple docstring'''
_A : str = """vivit"""
def __init__( self: int , snake_case: Any=224 , snake_case: List[Any]=32 , snake_case: Optional[Any]=[2, 16, 16] , snake_case: Dict=3 , snake_case: Any=768 , snake_case: Union[str, Any]=12 , snake_case: Optional[Any]=12 , snake_case: int=3_072 , snake_case: int="gelu_fast" , snake_case: int=0.0 , snake_case: Dict=0.0 , snake_case: Optional[Any]=0.0_2 , snake_case: Dict=1E-06 , snake_case: Union[str, Any]=True , **snake_case: Dict , ) -> Any:
snake_case_ :Union[str, Any] = hidden_size
snake_case_ :int = num_hidden_layers
snake_case_ :int = num_attention_heads
snake_case_ :List[str] = intermediate_size
snake_case_ :List[Any] = hidden_act
snake_case_ :List[str] = hidden_dropout_prob
snake_case_ :List[str] = attention_probs_dropout_prob
snake_case_ :Tuple = initializer_range
snake_case_ :Union[str, Any] = layer_norm_eps
snake_case_ :int = image_size
snake_case_ :Any = num_frames
snake_case_ :Optional[Any] = tubelet_size
snake_case_ :str = num_channels
snake_case_ :Optional[Any] = qkv_bias
super().__init__(**__UpperCamelCase )
| 702 |
"""simple docstring"""
import os
def A_ ( ):
'''simple docstring'''
with open(os.path.dirname(_lowercase ) + """/grid.txt""" ) as f:
snake_case_ :Optional[int] = [] # noqa: E741
for _ in range(20 ):
l.append([int(_lowercase ) for x in f.readline().split()] )
snake_case_ :str = 0
# right
for i in range(20 ):
for j in range(17 ):
snake_case_ :Dict = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
snake_case_ :str = temp
# down
for i in range(17 ):
for j in range(20 ):
snake_case_ :Tuple = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
snake_case_ :Optional[int] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
snake_case_ :Union[str, Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3, 20 ):
snake_case_ :int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
snake_case_ :Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 310 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase = 256
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = ['''melgan''']
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , ):
"""simple docstring"""
super().__init__()
# From MELGAN
_lowerCAmelCase = math.log(1e-5 ) # Matches MelGAN training.
_lowerCAmelCase = 4.0 # Largest value for most examples
_lowerCAmelCase = 128
self.register_modules(
notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , )
def _lowercase ( self , _lowercase , _lowercase=(-1.0, 1.0) , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = output_range
if clip:
_lowerCAmelCase = torch.clip(_lowercase , self.min_value , self.max_value )
# Scale to [0, 1].
_lowerCAmelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self , _lowercase , _lowercase=(-1.0, 1.0) , _lowercase=False ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = input_range
_lowerCAmelCase = torch.clip(_lowercase , _lowercase , _lowercase ) if clip else outputs
# Scale to [0, 1].
_lowerCAmelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = input_tokens > 0
_lowerCAmelCase , _lowerCAmelCase = self.notes_encoder(
encoder_input_tokens=_lowercase , encoder_inputs_mask=_lowercase )
_lowerCAmelCase , _lowerCAmelCase = self.continuous_encoder(
encoder_inputs=_lowercase , encoder_inputs_mask=_lowercase )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = noise_time
if not torch.is_tensor(_lowercase ):
_lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
_lowerCAmelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_lowerCAmelCase = self.decoder(
encodings_and_masks=_lowercase , decoder_input_tokens=_lowercase , decoder_noise_time=_lowercase )
return logits
@torch.no_grad()
def __call__( self , _lowercase , _lowercase = None , _lowercase = 100 , _lowercase = True , _lowercase = "numpy" , _lowercase = None , _lowercase = 1 , ):
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(_lowercase )}.' )
_lowerCAmelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_lowerCAmelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
_lowerCAmelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
for i, encoder_input_tokens in enumerate(_lowercase ):
if i == 0:
_lowerCAmelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_lowerCAmelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_lowercase , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_lowerCAmelCase = ones
_lowerCAmelCase = self.scale_features(
_lowercase , output_range=[-1.0, 1.0] , clip=_lowercase )
_lowerCAmelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_lowercase , continuous_mask=_lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_lowerCAmelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_lowercase )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_lowerCAmelCase = self.decode(
encodings_and_masks=_lowercase , input_tokens=_lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(_lowercase , _lowercase , _lowercase , generator=_lowercase ).prev_sample
_lowerCAmelCase = self.scale_to_features(_lowercase , input_range=[-1.0, 1.0] )
_lowerCAmelCase = mel[:1]
_lowerCAmelCase = mel.cpu().float().numpy()
_lowerCAmelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase )
logger.info("""Generated segment""" , _lowercase )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.""" )
if output_type == "numpy":
_lowerCAmelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_lowerCAmelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_lowercase )
| 5 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 317 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase: List[Any] = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Any = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCAmelCase: List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure) | 195 |
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
lowerCAmelCase: Tuple = 'src/transformers'
lowerCAmelCase: Union[str, Any] = 'docs/source/en'
lowerCAmelCase: Dict = '.'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Optional[Any] = f.readlines()
# Find the start prompt.
a : Dict = 0
while not lines[start_index].startswith(_A ):
start_index += 1
start_index += 1
a : Optional[Any] = start_index
while not lines[end_index].startswith(_A ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
lowerCAmelCase: List[str] = 'Model|Encoder|Decoder|ForConditionalGeneration'
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase: Dict = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCAmelCase: Any = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase: Dict = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: List[Any] = direct_transformers_import(TRANSFORMERS_PATH)
def lowerCamelCase__ ( _A ):
a : Tuple = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _A )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( _A , _A ):
a : List[Any] = 2 if text == '✅' or text == '❌' else len(_A )
a : Optional[int] = (width - text_length) // 2
a : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def lowerCamelCase__ ( ):
a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a : Union[str, Any] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
a : Tuple = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
a : int = collections.defaultdict(_A )
a : List[Any] = collections.defaultdict(_A )
a : List[Any] = collections.defaultdict(_A )
a : Union[str, Any] = collections.defaultdict(_A )
a : int = collections.defaultdict(_A )
# Let's lookup through all transformers object (once).
for attr_name in dir(_A ):
a : Optional[Any] = None
if attr_name.endswith('Tokenizer' ):
a : int = slow_tokenizers
a : Any = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
a : str = fast_tokenizers
a : List[Any] = attr_name[:-13]
elif _re_tf_models.match(_A ) is not None:
a : Optional[Any] = tf_models
a : Dict = _re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
a : int = flax_models
a : Optional[Any] = _re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
a : Tuple = pt_models
a : Optional[int] = _re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_name_to_prefix.values():
a : Tuple = True
break
# Try again after removing the last word in the name
a : Tuple = ''.join(camel_case_split(_A )[:-1] )
# Let's build that table!
a : List[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
a : Tuple = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
a : Tuple = [len(_A ) + 2 for c in columns]
a : str = max([len(_A ) for name in model_names] ) + 2
# Build the table per se
a : List[Any] = '|' + '|'.join([_center_text(_A , _A ) for c, w in zip(_A , _A )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
a : str = {True: '✅', False: '❌'}
for name in model_names:
a : Any = model_name_to_prefix[name]
a : Optional[Any] = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(_A , _A ) for l, w in zip(_A , _A )] ) + "|\n"
return table
def lowerCamelCase__ ( _A=False ):
a , a , a , a : Tuple = _find_text_in_file(
filename=os.path.join(_A , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
a : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(_A , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
lowerCAmelCase: Dict = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCAmelCase: Any = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 195 | 1 |
'''simple docstring'''
import numpy as np
def a_ ( __snake_case : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def a_ ( __snake_case : np.ndarray ) -> np.ndarray:
"""simple docstring"""
return vector * sigmoid(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( lowerCamelCase__ ):
def __init__( self, lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ =DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase, scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = 1, lowerCAmelCase = None, lowerCAmelCase = 0.0, lowerCAmelCase = 50, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(self.unet.config.sample_size, lowerCAmelCase ):
lowerCamelCase_ =(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowerCamelCase_ =(batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCamelCase_ =randn_tensor(lowerCAmelCase, generator=lowerCAmelCase, device=self.device, dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ =self.unet(lowerCAmelCase, lowerCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, eta=lowerCAmelCase, use_clipped_model_output=lowerCAmelCase, generator=lowerCAmelCase ).prev_sample
lowerCamelCase_ =(image / 2 + 0.5).clamp(0, 1 )
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase )
| 676 | 1 |
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowercase =[
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def lowerCamelCase__ ( __lowerCamelCase : Any=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=lowerCAmelCase ) )
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =None
UpperCAmelCase =None
def lowerCAmelCase ( self , snake_case , snake_case) -> int:
'''simple docstring'''
with TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[Any] =dataset_module_factory(snake_case , cache_dir=snake_case)
_UpperCAmelCase : List[str] =import_main_class(dataset_module.module_path , dataset=snake_case)
_UpperCAmelCase : DatasetBuilder =builder_cls(
cache_dir=snake_case , config_name=snake_case , hash=dataset_module.hash , )
_UpperCAmelCase : List[str] ='/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=snake_case).replace(os.sep , '/'),
config.DATASET_INFO_FILENAME,
])
_UpperCAmelCase : Any =cached_path(snake_case , cache_dir=snake_case)
self.assertTrue(os.path.exists(snake_case))
@pytest.mark.integration
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
_UpperCAmelCase : Any =dataset_module_factory('wikipedia' , cache_dir=__lowerCamelCase )
_UpperCAmelCase : Optional[int] =import_main_class(dataset_module.module_path )
_UpperCAmelCase : DatasetBuilder =builder_cls(
cache_dir=__lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
_UpperCAmelCase : Dict =None
builder_instance.download_and_prepare()
_UpperCAmelCase : Tuple =builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def lowerCamelCase__ ( __lowerCamelCase : Dict ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] =dataset_module_factory('wikipedia' , cache_dir=__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =import_main_class(dataset_module.module_path , dataset=__lowerCamelCase )
_UpperCAmelCase : DatasetBuilder =builder_cls(
cache_dir=__lowerCamelCase , config_name='20220301.frr' , hash=dataset_module.hash , )
_UpperCAmelCase : Optional[Any] =builder_instance.as_streaming_dataset()
assert ds
assert isinstance(__lowerCamelCase , __lowerCamelCase )
assert "train" in ds
assert isinstance(ds['train'] , __lowerCamelCase )
assert next(iter(ds['train'] ) )
| 720 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : int ='https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
_UpperCAmelCase : Optional[int] =Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert('RGB' )
return image
def lowerCamelCase__ ( __lowerCamelCase : Any ):
'''simple docstring'''
_UpperCAmelCase : int =[]
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =dct.pop(__lowerCamelCase )
_UpperCAmelCase : Dict =val
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[str] ):
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCAmelCase : str =state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
_UpperCAmelCase : Tuple =state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
_UpperCAmelCase : Optional[Any] =torch.cat((q_bias, torch.zeros_like(__lowerCamelCase , requires_grad=__lowerCamelCase ), v_bias) )
_UpperCAmelCase : int =qkv_bias
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =3_6_4 if 'coco' in model_name else 2_2_4
_UpperCAmelCase : Optional[int] =BlipaVisionConfig(image_size=__lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCAmelCase : Tuple =OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCAmelCase : Dict =OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_UpperCAmelCase : Union[str, Any] =TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCAmelCase : Dict =TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
_UpperCAmelCase : str =BlipaConfig(vision_config=__lowerCamelCase , text_config=__lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Dict=None , __lowerCamelCase : Dict=False ):
'''simple docstring'''
_UpperCAmelCase : str =(
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
_UpperCAmelCase : Tuple =tokenizer('\n' , add_special_tokens=__lowerCamelCase ).input_ids[0]
_UpperCAmelCase , _UpperCAmelCase : List[str] =get_blipa_config(__lowerCamelCase , eos_token_id=__lowerCamelCase )
_UpperCAmelCase : Optional[int] =BlipaForConditionalGeneration(__lowerCamelCase ).eval()
_UpperCAmelCase : int ={
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
_UpperCAmelCase , _UpperCAmelCase : Tuple =model_name_to_original[model_name]
# load original model
print('Loading original model...' )
_UpperCAmelCase : Optional[int] ='cuda' if torch.cuda.is_available() else 'cpu'
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] =load_model_and_preprocess(
name=__lowerCamelCase , model_type=__lowerCamelCase , is_eval=__lowerCamelCase , device=__lowerCamelCase )
original_model.eval()
print('Done!' )
# update state dict keys
_UpperCAmelCase : List[Any] =original_model.state_dict()
_UpperCAmelCase : Optional[Any] =create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCAmelCase : Optional[Any] =state_dict.pop(__lowerCamelCase )
if key.startswith('Qformer.bert' ):
_UpperCAmelCase : Tuple =key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
_UpperCAmelCase : Optional[Any] =key.replace('self' , 'attention' )
if "opt_proj" in key:
_UpperCAmelCase : List[str] =key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
_UpperCAmelCase : Tuple =key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
_UpperCAmelCase : Optional[Any] =key.replace('opt' , 'language' )
if key.startswith('t5' ):
_UpperCAmelCase : Dict =key.replace('t5' , 'language' )
_UpperCAmelCase : Any =val
# read in qv biases
read_in_q_v_bias(__lowerCamelCase , __lowerCamelCase )
_UpperCAmelCase , _UpperCAmelCase : List[Any] =hf_model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase )
assert len(__lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCAmelCase : Union[str, Any] =load_demo_image()
_UpperCAmelCase : str =vis_processors['eval'](__lowerCamelCase ).unsqueeze(0 ).to(__lowerCamelCase )
_UpperCAmelCase : Any =tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__lowerCamelCase )
# create processor
_UpperCAmelCase : str =BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__lowerCamelCase , image_std=__lowerCamelCase )
_UpperCAmelCase : Union[str, Any] =BlipaProcessor(image_processor=__lowerCamelCase , tokenizer=__lowerCamelCase )
_UpperCAmelCase : str =processor(images=__lowerCamelCase , return_tensors='pt' ).pixel_values.to(__lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__lowerCamelCase , __lowerCamelCase )
original_model.to(__lowerCamelCase )
hf_model.to(__lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_UpperCAmelCase : Dict =original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
_UpperCAmelCase : int =hf_model(__lowerCamelCase , __lowerCamelCase ).logits
else:
_UpperCAmelCase : Tuple =original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
_UpperCAmelCase : Union[str, Any] =input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
_UpperCAmelCase : Any =hf_model(__lowerCamelCase , __lowerCamelCase , labels=__lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCAmelCase : Dict =torch.tensor(
[[-41.58_50, -4.44_40, -8.99_22], [-47.43_22, -5.91_43, -1.73_40]] , device=__lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , __lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCAmelCase : Optional[Any] =torch.tensor(
[[-57.01_09, -9.89_67, -12.62_80], [-68.65_78, -12.71_91, -10.50_65]] , device=__lowerCamelCase )
else:
# cast to same type
_UpperCAmelCase : List[str] =logits.dtype
assert torch.allclose(original_logits.to(__lowerCamelCase ) , __lowerCamelCase , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
_UpperCAmelCase : str =''
_UpperCAmelCase : Tuple =tokenizer(__lowerCamelCase , return_tensors='pt' ).input_ids.to(__lowerCamelCase )
_UpperCAmelCase : Any =original_model.generate({'image': original_pixel_values} )
_UpperCAmelCase : List[str] =hf_model.generate(
__lowerCamelCase , __lowerCamelCase , do_sample=__lowerCamelCase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , __lowerCamelCase )
_UpperCAmelCase : List[Any] =input_ids.shape[1]
_UpperCAmelCase : Optional[int] =processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__lowerCamelCase )
_UpperCAmelCase : Optional[Any] =[text.strip() for text in output_text]
print('HF generation:' , __lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCamelCase )
hf_model.save_pretrained(__lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
lowercase =argparse.ArgumentParser()
lowercase =[
'blip2-opt-2.7b',
'blip2-opt-6.7b',
'blip2-opt-2.7b-coco',
'blip2-opt-6.7b-coco',
'blip2-flan-t5-xl',
'blip2-flan-t5-xl-coco',
'blip2-flan-t5-xxl',
]
parser.add_argument(
'--model_name',
default='blip2-opt-2.7b',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
lowercase =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 331 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''audio_values''', '''audio_mask''']
def __init__( self , lowercase_=2_0_4_8 , lowercase_=1 , lowercase_=[1_6, 1_6] , lowercase_=1_2_8 , lowercase_=4_4_1_0_0 , lowercase_=8_6 , lowercase_=2_0_4_8 , lowercase_=0.0 , **lowercase_ , ) -> Optional[int]:
super().__init__(
feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , **lowercase_ , )
__snake_case = spectrogram_length
__snake_case = num_channels
__snake_case = patch_size
__snake_case = feature_size // self.patch_size[1]
__snake_case = n_fft
__snake_case = sampling_rate // hop_length_to_sampling_rate
__snake_case = sampling_rate
__snake_case = padding_value
__snake_case = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase_ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=lowercase_ , norm='slaney' , mel_scale='slaney' , ).T
def _a ( self , lowercase_) -> np.ndarray:
__snake_case = spectrogram(
lowercase_ , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__snake_case = log_spec[:, :-1]
__snake_case = log_spec - 20.0
__snake_case = np.clip(log_spec / 40.0 , -2.0 , 0.0) + 1.0
return log_spec
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = None , lowercase_ = False , lowercase_ = False , **lowercase_ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
F" with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
__snake_case = isinstance(lowercase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F"Only mono-channel audio is supported for input to {self}")
__snake_case = is_batched_numpy or (
isinstance(lowercase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
__snake_case = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase_ , np.ndarray):
__snake_case = np.asarray(lowercase_ , dtype=np.floataa)
elif isinstance(lowercase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
__snake_case = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
__snake_case = [np.asarray([raw_speech]).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__snake_case = [
self._np_extract_fbank_features(waveform.squeeze()).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowercase_):
__snake_case = [np.asarray(lowercase_ , dtype=np.floataa) for feature in audio_features]
# Create audio attention mask
__snake_case = max(
[ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len for feature in audio_features]) # The maximum number of audio patches in a batch
if return_attention_mask:
__snake_case = [
(ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0]) * self.freq_len) * [0]
for feature in audio_features
]
__snake_case = np.array(lowercase_).astype(np.floataa)
# convert into correct format for padding
__snake_case = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__snake_case = np.ones([len(lowercase_), 1, max_time_len, self.feature_size]).astype(np.floataa)
__snake_case = padded_audio_features * self.padding_value
for i in range(len(lowercase_)):
__snake_case = audio_features[i]
__snake_case = feature
# return as BatchFeature
if return_attention_mask:
__snake_case = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__snake_case = {'audio_values': padded_audio_features}
__snake_case = BatchFeature(data=lowercase_ , tensor_type=lowercase_)
return encoded_inputs
| 313 |
from typing import Any
class __lowercase :
def __init__( self , lowercase_) -> str:
__snake_case = data
__snake_case = None
def __repr__( self) -> str:
return F"Node({self.data})"
class __lowercase :
def __init__( self) -> Dict:
__snake_case = None
def __iter__( self) -> Any:
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next
def __len__( self) -> int:
return sum(1 for _ in self)
def __repr__( self) -> str:
return "->".join([str(lowercase_) for item in self])
def __getitem__( self , lowercase_) -> Any:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
for i, node in enumerate(self):
if i == index:
return node
return None
def __setitem__( self , lowercase_ , lowercase_) -> None:
if not 0 <= index < len(self):
raise ValueError('list index out of range.')
__snake_case = self.head
for _ in range(lowercase_):
__snake_case = current.next
__snake_case = data
def _a ( self , lowercase_) -> None:
self.insert_nth(len(self) , lowercase_)
def _a ( self , lowercase_) -> None:
self.insert_nth(0 , lowercase_)
def _a ( self , lowercase_ , lowercase_) -> None:
if not 0 <= index <= len(self):
raise IndexError('list index out of range')
__snake_case = Node(lowercase_)
if self.head is None:
__snake_case = new_node
elif index == 0:
__snake_case = self.head # link new_node to head
__snake_case = new_node
else:
__snake_case = self.head
for _ in range(index - 1):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = new_node
def _a ( self) -> None: # print every node data
print(self)
def _a ( self) -> Any:
return self.delete_nth(0)
def _a ( self) -> Any: # delete from tail
return self.delete_nth(len(self) - 1)
def _a ( self , lowercase_ = 0) -> Any:
if not 0 <= index <= len(self) - 1: # test if index is valid
raise IndexError('List index out of range.')
__snake_case = self.head # default first node
if index == 0:
__snake_case = self.head.next
else:
__snake_case = self.head
for _ in range(index - 1):
__snake_case = temp.next
__snake_case = temp.next
__snake_case = temp.next.next
return delete_node.data
def _a ( self) -> bool:
return self.head is None
def _a ( self) -> None:
__snake_case = None
__snake_case = self.head
while current:
# Store the current node's next node.
__snake_case = current.next
# Make the current node's next point backwards
__snake_case = prev
# Make the previous node be the current node
__snake_case = current
# Make the current node the next node (to progress iteration)
__snake_case = next_node
# Return prev in order to put the head at the end
__snake_case = prev
def A ( ) -> None:
'''simple docstring'''
__snake_case = LinkedList()
assert linked_list.is_empty() is True
assert str(snake_case__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(snake_case__ ) == i
linked_list.insert_nth(snake_case__ , i + 1 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(snake_case__ ) == 9
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__snake_case = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(snake_case__ ) == "->".join(str(snake_case__ ) for i in range(-8 , 1 ) )
def A ( ) -> None:
'''simple docstring'''
__snake_case = [
-9,
100,
Node(7734_5112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
__snake_case = LinkedList()
for i in test_input:
linked_list.insert_tail(snake_case__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(snake_case__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__snake_case = linked_list.delete_head()
assert result == -9
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__snake_case = linked_list.delete_tail()
assert result == 12.2
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__snake_case = linked_list.delete_nth(10 )
assert result is None
assert (
str(snake_case__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(snake_case__ )
assert (
str(snake_case__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(snake_case__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def A ( ) -> str:
'''simple docstring'''
from doctest import testmod
testmod()
__snake_case = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(snake_case__ )
print('\nReading/changing Node data using indexing:' )
print(f"Element at Position 1: {linked_list[1]}" )
__snake_case = input('Enter New Value: ' ).strip()
print('New list:' )
print(snake_case__ )
print(f"length of linked_list is : {len(snake_case__ )}" )
if __name__ == "__main__":
main()
| 313 | 1 |
"""simple docstring"""
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
lowerCAmelCase__ = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
lowerCAmelCase__ = [ord(letter) for letter in string.ascii_lowercase]
lowerCAmelCase__ = {ord(char) for char in VALID_CHARS}
lowerCAmelCase__ = ["the", "be", "to", "of", "and", "in", "that", "have"]
def _lowerCamelCase ( __a, __a ):
SCREAMING_SNAKE_CASE_ = ""
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
for keychar, cipherchar in zip(cycle(__a ), __a ):
SCREAMING_SNAKE_CASE_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(__a )
return decoded
def _lowerCamelCase ( __a ):
SCREAMING_SNAKE_CASE_ = []
for key in product(__a, repeat=3 ):
SCREAMING_SNAKE_CASE_ = try_key(__a, __a )
if encoded is not None:
possibles.append(__a )
return possibles
def _lowerCamelCase ( __a, __a ):
return [possible for possible in possibles if common_word in possible.lower()]
def _lowerCamelCase ( __a = "p059_cipher.txt" ):
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = 42
SCREAMING_SNAKE_CASE_ = Path(__a ).parent.joinpath(__a ).read_text(encoding='''utf-8''' )
SCREAMING_SNAKE_CASE_ = [int(__a ) for number in data.strip().split(''',''' )]
SCREAMING_SNAKE_CASE_ = filter_valid_chars(__a )
for common_word in COMMON_WORDS:
SCREAMING_SNAKE_CASE_ = filter_common_word(__a, __a )
if len(__a ) == 1:
break
SCREAMING_SNAKE_CASE_ = possibles[0]
return sum(ord(__a ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''') | 628 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase__ = False
class snake_case ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''cyberpunk 2077'''
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = '''A painting of a squirrel eating a burger '''
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
SCREAMING_SNAKE_CASE_ = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='''numpy''' ).images
SCREAMING_SNAKE_CASE_ = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 628 | 1 |
"""simple docstring"""
from statistics import mean, stdev
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3 ) -> list:
"""simple docstring"""
__snake_case = min(SCREAMING_SNAKE_CASE )
__snake_case = max(SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , SCREAMING_SNAKE_CASE ) for x in data]
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 3 ) -> list:
"""simple docstring"""
__snake_case = mean(SCREAMING_SNAKE_CASE )
__snake_case = stdev(SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , SCREAMING_SNAKE_CASE ) for x in data]
| 163 |
"""simple docstring"""
from math import factorial, radians
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 18 , SCREAMING_SNAKE_CASE = 10 ) -> float:
"""simple docstring"""
__snake_case = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__snake_case = radians(SCREAMING_SNAKE_CASE )
__snake_case = angle_in_radians
__snake_case = 3
__snake_case = -1
for _ in range(SCREAMING_SNAKE_CASE ):
result += (b * (angle_in_radians**a)) / factorial(SCREAMING_SNAKE_CASE )
__snake_case = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 163 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class __a( _a , _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = StableDiffusionLatentUpscalePipeline
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase = frozenset([] )
lowerCAmelCase = True
@property
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Union[str, Any] = (16, 16)
UpperCAmelCase_ : List[str] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_SCREAMING_SNAKE_CASE )
return image
def a__ ( self ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = UNetaDConditionModel(
act_fn='''gelu''' ,attention_head_dim=8 ,norm_num_groups=_SCREAMING_SNAKE_CASE ,block_out_channels=[32, 32, 64, 64] ,time_cond_proj_dim=160 ,conv_in_kernel=1 ,conv_out_kernel=1 ,cross_attention_dim=32 ,down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) ,in_channels=8 ,mid_block_type=_SCREAMING_SNAKE_CASE ,only_cross_attention=_SCREAMING_SNAKE_CASE ,out_channels=5 ,resnet_time_scale_shift='''scale_shift''' ,time_embedding_type='''fourier''' ,timestep_post_act='''gelu''' ,up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') ,)
UpperCAmelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
UpperCAmelCase_ : Union[str, Any] = EulerDiscreteScheduler(prediction_type='''sample''' )
UpperCAmelCase_ : str = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,hidden_act='''quick_gelu''' ,projection_dim=512 ,)
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : List[Any] = {
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=0 ) -> List[str]:
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
UpperCAmelCase_ : Tuple = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def a__ ( self ) -> int:
UpperCAmelCase_ : Dict = '''cpu'''
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Tuple = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = pipe(**_SCREAMING_SNAKE_CASE ).images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 256, 256, 3) )
UpperCAmelCase_ : Optional[Any] = np.array(
[0.47_22_24_12, 0.41_92_16_33, 0.44_71_74_34, 0.46_87_41_92, 0.42_58_82_58, 0.46_15_07_26, 0.4_67_75_34, 0.45_58_38_32, 0.48_57_90_55] )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE ,1e-3 )
def a__ ( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def a__ ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def a__ ( self ) -> Optional[int]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def a__ ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def a__ ( self ) -> Union[str, Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def a__ ( self ) -> Any:
super().test_save_load_local(expected_max_difference=3e-3 )
def a__ ( self ) -> Union[str, Any]:
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : int = [
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Dict = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : List[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
UpperCAmelCase_ : Union[str, Any] = getattr(_SCREAMING_SNAKE_CASE ,scheduler_enum.name )
UpperCAmelCase_ : int = scheduler_cls.from_config(pipe.scheduler.config )
UpperCAmelCase_ : int = pipe(**_SCREAMING_SNAKE_CASE )[0]
outputs.append(_SCREAMING_SNAKE_CASE )
assert check_same_shape(_SCREAMING_SNAKE_CASE )
@require_torch_gpu
@slow
class __a( unittest.TestCase ):
"""simple docstring"""
def a__ ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(33 )
UpperCAmelCase_ : Tuple = StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' ,torch_dtype=torch.floataa )
pipe.to('''cuda''' )
UpperCAmelCase_ : List[Any] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCAmelCase_ : Optional[Any] = '''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
UpperCAmelCase_ : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE ,generator=_SCREAMING_SNAKE_CASE ,output_type='''latent''' ).images
UpperCAmelCase_ : Union[str, Any] = upscaler(
prompt=_SCREAMING_SNAKE_CASE ,image=_SCREAMING_SNAKE_CASE ,num_inference_steps=20 ,guidance_scale=0 ,generator=_SCREAMING_SNAKE_CASE ,output_type='''np''' ,).images[0]
UpperCAmelCase_ : Optional[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def a__ ( self ) -> str:
UpperCAmelCase_ : Dict = torch.manual_seed(33 )
UpperCAmelCase_ : Optional[int] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' ,torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
UpperCAmelCase_ : Optional[int] = '''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
UpperCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
UpperCAmelCase_ : str = upscaler(
prompt=_SCREAMING_SNAKE_CASE ,image=_SCREAMING_SNAKE_CASE ,num_inference_steps=20 ,guidance_scale=0 ,generator=_SCREAMING_SNAKE_CASE ,output_type='''np''' ,).images[0]
UpperCAmelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2 | 300 |
import os
import string
import sys
__a = 1 << 8
__a = {
'tab': ord('\t'),
'newline': ord('\r'),
'esc': 27,
'up': 65 + ARROW_KEY_FLAG,
'down': 66 + ARROW_KEY_FLAG,
'right': 67 + ARROW_KEY_FLAG,
'left': 68 + ARROW_KEY_FLAG,
'mod_int': 91,
'undefined': sys.maxsize,
'interrupt': 3,
'insert': 50,
'delete': 51,
'pg_up': 53,
'pg_down': 54,
}
__a = KEYMAP['up']
__a = KEYMAP['left']
if sys.platform == "win32":
__a = []
__a = {
B'\xe0H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\x00H': KEYMAP['up'] - ARROW_KEY_FLAG,
B'\xe0P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\x00P': KEYMAP['down'] - ARROW_KEY_FLAG,
B'\xe0M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\x00M': KEYMAP['right'] - ARROW_KEY_FLAG,
B'\xe0K': KEYMAP['left'] - ARROW_KEY_FLAG,
B'\x00K': KEYMAP['left'] - ARROW_KEY_FLAG,
}
for i in range(10):
__a = ord(str(i))
def lowerCamelCase__ ( ):
'''simple docstring'''
if os.name == "nt":
import msvcrt
UpperCAmelCase_ : Union[str, Any] = '''mbcs'''
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_lowercase ) == 0:
# Read the keystroke
UpperCAmelCase_ : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase_ : Union[str, Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase_ : List[Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['''mod_int'''] ) )
WIN_CH_BUFFER.append(_lowercase )
if ord(_lowercase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
UpperCAmelCase_ : Tuple = chr(KEYMAP['''esc'''] )
except KeyError:
UpperCAmelCase_ : Dict = cha[1]
else:
UpperCAmelCase_ : int = ch.decode(_lowercase )
else:
UpperCAmelCase_ : Optional[Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase_ : str = sys.stdin.fileno()
UpperCAmelCase_ : Optional[Any] = termios.tcgetattr(_lowercase )
try:
tty.setraw(_lowercase )
UpperCAmelCase_ : Dict = sys.stdin.read(1 )
finally:
termios.tcsetattr(_lowercase , termios.TCSADRAIN , _lowercase )
return ch
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = get_raw_chars()
if ord(_lowercase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_lowercase ) == KEYMAP["esc"]:
UpperCAmelCase_ : Union[str, Any] = get_raw_chars()
if ord(_lowercase ) == KEYMAP["mod_int"]:
UpperCAmelCase_ : Optional[int] = get_raw_chars()
if ord(_lowercase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_lowercase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_lowercase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 300 | 1 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __A :
def __init__( self :Optional[Any] , __snake_case :List[str] , __snake_case :Any=99 , __snake_case :Optional[int]=13 , __snake_case :Any=16 , __snake_case :Any=7 , __snake_case :Tuple=True , __snake_case :List[Any]=True , __snake_case :Union[str, Any]=True , __snake_case :Dict=False , __snake_case :str=True , __snake_case :Optional[Any]=2 , __snake_case :Optional[int]=32 , __snake_case :int=4 , __snake_case :str=4 , __snake_case :Optional[int]=30 , __snake_case :str=0 , __snake_case :Optional[Any]=1 , __snake_case :Optional[int]=2 , __snake_case :List[str]=None , ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =parent
__magic_name__ : str =batch_size
__magic_name__ : List[str] =decoder_seq_length
# For common tests
__magic_name__ : Optional[Any] =self.decoder_seq_length
__magic_name__ : Any =is_training
__magic_name__ : Any =use_attention_mask
__magic_name__ : List[str] =use_labels
__magic_name__ : List[str] =vocab_size
__magic_name__ : str =d_model
__magic_name__ : Optional[int] =d_model
__magic_name__ : str =decoder_layers
__magic_name__ : Optional[Any] =decoder_layers
__magic_name__ : Dict =decoder_ffn_dim
__magic_name__ : Tuple =decoder_attention_heads
__magic_name__ : Optional[Any] =decoder_attention_heads
__magic_name__ : Union[str, Any] =eos_token_id
__magic_name__ : Optional[Any] =bos_token_id
__magic_name__ : Optional[Any] =pad_token_id
__magic_name__ : Any =decoder_start_token_id
__magic_name__ : str =use_cache
__magic_name__ : Optional[int] =max_position_embeddings
__magic_name__ : Any =None
__magic_name__ : Dict =decoder_seq_length
__magic_name__ : Union[str, Any] =2
__magic_name__ : List[str] =1
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__magic_name__ : Any =None
if self.use_attention_mask:
__magic_name__ : int =ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
__magic_name__ : Optional[int] =None
if self.use_labels:
__magic_name__ : List[str] =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
__magic_name__ : Tuple =TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def A__ ( self :Union[str, Any] , __snake_case :Any , __snake_case :int , __snake_case :Dict , __snake_case :Dict , ):
'''simple docstring'''
__magic_name__ : Dict =True
__magic_name__ : List[Any] =TrOCRDecoder(config=__snake_case ).to(__snake_case ).eval()
__magic_name__ : List[Any] =input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
__magic_name__ : str =model(__snake_case , use_cache=__snake_case )
__magic_name__ : Optional[Any] =model(__snake_case )
__magic_name__ : str =model(__snake_case , use_cache=__snake_case )
self.parent.assertTrue(len(__snake_case ) == len(__snake_case ) )
self.parent.assertTrue(len(__snake_case ) == len(__snake_case ) + 1 )
__magic_name__ : str =outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
__magic_name__ : Dict =ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
__magic_name__ : List[Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
__magic_name__ : Dict =model(__snake_case )["""last_hidden_state"""]
__magic_name__ : Union[str, Any] =model(__snake_case , past_key_values=__snake_case )["""last_hidden_state"""]
# select random slice
__magic_name__ : Dict =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__magic_name__ : List[str] =output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
__magic_name__ : List[str] =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(__snake_case , __snake_case , atol=1E-3 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : int =config_and_inputs
__magic_name__ : Optional[int] ={"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
UpperCamelCase = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
UpperCamelCase = (TrOCRForCausalLM,) if is_torch_available() else ()
UpperCamelCase = {"""text-generation""": TrOCRForCausalLM} if is_torch_available() else {}
UpperCamelCase = True
UpperCamelCase = False
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Dict =TrOCRStandaloneDecoderModelTester(self , is_training=__snake_case )
__magic_name__ : Optional[int] =ConfigTester(self , config_class=__snake_case )
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :int ):
'''simple docstring'''
pass
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def A__ ( self :Optional[int] ):
'''simple docstring'''
pass
| 21 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class __A ( nn.Module ):
def __init__( self :List[Any] ):
'''simple docstring'''
super().__init__()
__magic_name__ : Tuple =nn.Linear(3 , 4 )
__magic_name__ : Union[str, Any] =nn.BatchNormad(4 )
__magic_name__ : List[str] =nn.Linear(4 , 5 )
def A__ ( self :Dict , __snake_case :Tuple ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class __A ( UpperCamelCase__ ):
def A__ ( self :Any , __snake_case :Optional[Any] , *__snake_case :List[Any] , **__snake_case :Any ):
'''simple docstring'''
return (args[0] + 1,) + args[1:], kwargs
class __A ( UpperCamelCase__ ):
def A__ ( self :List[str] , __snake_case :Tuple , __snake_case :Union[str, Any] ):
'''simple docstring'''
return output + 1
class __A ( unittest.TestCase ):
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : Tuple =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
self.assertEqual(test_model._hf_hook , __snake_case )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
__magic_name__ : List[str] =ModelHook()
add_hook_to_module(__snake_case , __snake_case )
add_hook_to_module(__snake_case , __snake_case , append=__snake_case )
self.assertEqual(isinstance(test_model._hf_hook , __snake_case ) , __snake_case )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(__snake_case , """_old_forward""" ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , """forward""" )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] )
remove_hook_from_module(__snake_case )
self.assertFalse(hasattr(__snake_case , """_hf_hook""" ) )
self.assertFalse(hasattr(__snake_case , """_old_forward""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
__magic_name__ : Any =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(x + 1 )
__magic_name__ : Optional[Any] =test_model(x + 2 )
__magic_name__ : int =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : int =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : str =PreForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : List[str] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Optional[Any] =SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
assert torch.allclose(__snake_case , __snake_case , atol=1E-5 )
def A__ ( self :Any ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
__magic_name__ : Dict =torch.randn(2 , 3 )
__magic_name__ : Any =test_model(__snake_case )
__magic_name__ : Dict =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Any =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Optional[int] =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__magic_name__ : Union[str, Any] =SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
assert torch.allclose(__snake_case , output + 2 , atol=1E-5 )
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Tuple =ModelForTest()
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =test_model(__snake_case )
__magic_name__ : Union[str, Any] =PostForwardHook()
add_hook_to_module(__snake_case , __snake_case )
__magic_name__ : Dict =test_model(__snake_case )
self.assertTrue(torch.allclose(__snake_case , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__magic_name__ : Any =True
__magic_name__ : Any =test_model(__snake_case )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[Any] =model(__snake_case )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(__snake_case , AlignDevicesHook(io_same_device=__snake_case ) )
__magic_name__ : int =torch.randn(2 , 3 ).to(0 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , torch.device(0 ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : int =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : int ={"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[int] =torch.device(hook_kwargs["""execution_device"""] )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : Union[str, Any] =torch.randn(2 , 3 )
__magic_name__ : Optional[int] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
__magic_name__ : Tuple ={
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**__snake_case ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**__snake_case ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Tuple =torch.randn(2 , 3 )
__magic_name__ : int =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Any =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : str =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(__snake_case , execution_device=__snake_case , offload=__snake_case , offload_buffers=__snake_case )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : Optional[int] =torch.randn(2 , 3 )
__magic_name__ : Union[str, Any] =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Dict =ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# This will move each submodule on different devices
__magic_name__ : List[str] =0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
# Buffers are not included in the offload by default, so are on the execution device
__magic_name__ : Optional[Any] =torch.device(__snake_case )
self.assertEqual(model.batchnorm.running_mean.device , __snake_case )
__magic_name__ : int =torch.randn(2 , 3 )
__magic_name__ : Any =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
# Now test with buffers included in the offload
attach_align_device_hook(
__snake_case , execution_device=__snake_case , offload=__snake_case , weights_map=model.state_dict() , offload_buffers=__snake_case , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) )
__magic_name__ : List[Any] =torch.randn(2 , 3 )
__magic_name__ : str =model(__snake_case )
self.assertEqual(output.device , __snake_case )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(__snake_case )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) )
self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
| 21 | 1 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> str:
if isinstance(__UpperCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowercase__ :
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(__magic_name__, __magic_name__, f"Difference between torch and flax is {diff} (>= {tol})." )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__=None, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : int = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[Any] = FlaxVisionTextDualEncoderModel(__magic_name__ )
UpperCamelCase__ : Optional[Any] = model(input_ids=__magic_name__, pixel_values=__magic_name__, attention_mask=__magic_name__ )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], config.projection_dim) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__=None, **__magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Union[str, Any] = self.get_vision_text_model(__magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[Any] = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
UpperCamelCase__ : Optional[Any] = model(input_ids=__magic_name__, pixel_values=__magic_name__, attention_mask=__magic_name__ )
self.assertEqual(output['''text_embeds'''].shape, (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape, (pixel_values.shape[0], model.config.projection_dim) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__=None, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = self.get_vision_text_model(__magic_name__, __magic_name__ )
UpperCamelCase__ : str = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
UpperCamelCase__ : Union[str, Any] = model(input_ids=__magic_name__, pixel_values=__magic_name__, attention_mask=__magic_name__ )
UpperCamelCase__ : Dict = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
UpperCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
UpperCamelCase__ : Dict = model(input_ids=__magic_name__, pixel_values=__magic_name__, attention_mask=__magic_name__ )
UpperCamelCase__ : str = after_output[0]
UpperCamelCase__ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__, 1E-3 )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__=None, **__magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = self.get_vision_text_model(__magic_name__, __magic_name__ )
UpperCamelCase__ : Any = {'''vision_model''': vision_model, '''text_model''': text_model}
UpperCamelCase__ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
UpperCamelCase__ : Dict = model(
input_ids=__magic_name__, pixel_values=__magic_name__, attention_mask=__magic_name__, output_attentions=__magic_name__ )
UpperCamelCase__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ), vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCamelCase__ : Union[str, Any] = to_atuple(vision_model.config.image_size )
UpperCamelCase__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
UpperCamelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
UpperCamelCase__ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len) )
UpperCamelCase__ : str = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ), text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> int:
"""simple docstring"""
pt_model.to(__magic_name__ )
pt_model.eval()
# prepare inputs
UpperCamelCase__ : Tuple = inputs_dict
UpperCamelCase__ : str = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
UpperCamelCase__ : Tuple = pt_model(**__magic_name__ ).to_tuple()
UpperCamelCase__ : List[Any] = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__, pt_output.numpy(), 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
UpperCamelCase__ : Tuple = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__, from_pt=__magic_name__ )
UpperCamelCase__ : Any = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__, pt_output.numpy(), 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
UpperCamelCase__ : List[Any] = VisionTextDualEncoderModel.from_pretrained(__magic_name__, from_flax=__magic_name__ )
pt_model_loaded.to(__magic_name__ )
pt_model_loaded.eval()
with torch.no_grad():
UpperCamelCase__ : Tuple = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ), len(__magic_name__ ), '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4] ):
self.assert_almost_equals(__magic_name__, pt_output_loaded.numpy(), 4E-2 )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__, __magic_name__ )
UpperCamelCase__ : List[str] = VisionTextDualEncoderModel(__magic_name__ )
UpperCamelCase__ : List[Any] = FlaxVisionTextDualEncoderModel(__magic_name__ )
UpperCamelCase__ : str = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), __magic_name__ )
UpperCamelCase__ : Optional[int] = fx_state
self.check_pt_flax_equivalence(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__, __magic_name__ )
UpperCamelCase__ : Optional[int] = VisionTextDualEncoderModel(__magic_name__ )
UpperCamelCase__ : Optional[int] = FlaxVisionTextDualEncoderModel(__magic_name__ )
UpperCamelCase__ : List[str] = load_flax_weights_in_pytorch_model(__magic_name__, fx_model.params )
self.check_pt_flax_equivalence(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__magic_name__ )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_save_load(**__magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__magic_name__ )
@is_pt_flax_cross_test
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCamelCase__ : List[str] = config_inputs_dict.pop('''vision_config''' )
UpperCamelCase__ : Dict = config_inputs_dict.pop('''text_config''' )
UpperCamelCase__ : List[str] = config_inputs_dict
self.check_equivalence_pt_to_flax(__magic_name__, __magic_name__, __magic_name__ )
self.check_equivalence_flax_to_pt(__magic_name__, __magic_name__, __magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Dict = self.get_pretrained_model_and_inputs()
UpperCamelCase__ : int = model_a(**__magic_name__ )
UpperCamelCase__ : Any = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__magic_name__ )
UpperCamelCase__ : Optional[Any] = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
UpperCamelCase__ : Union[str, Any] = model_a(**__magic_name__ )
UpperCamelCase__ : Tuple = after_outputs[0]
UpperCamelCase__ : str = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__, 1E-5 )
@require_flax
class lowercase__ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''', '''hf-internal-testing/tiny-bert''', vision_from_pt=__magic_name__, text_from_pt=__magic_name__, )
UpperCamelCase__ : Dict = 13
UpperCamelCase__ : List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ : Tuple = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCamelCase__ : Dict = random_attention_mask([batch_size, 4] )
UpperCamelCase__ : List[Any] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = FlaxViTModel(__magic_name__ )
UpperCamelCase__ : Any = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : str = FlaxViTModelTester(self )
UpperCamelCase__ : Optional[int] = FlaxBertModelTester(self )
UpperCamelCase__ : Dict = vit_model_tester.prepare_config_and_inputs()
UpperCamelCase__ : List[str] = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ : int = vision_config_and_inputs
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : List[Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowercase__ ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''', '''hf-internal-testing/tiny-bert''', vision_from_pt=__magic_name__, text_from_pt=__magic_name__, )
UpperCamelCase__ : Dict = 13
UpperCamelCase__ : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
UpperCamelCase__ : Dict = ids_tensor([batch_size, 4], model.config.text_config.vocab_size )
UpperCamelCase__ : Dict = random_attention_mask([batch_size, 4] )
UpperCamelCase__ : List[str] = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = FlaxCLIPVisionModel(__magic_name__ )
UpperCamelCase__ : str = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = FlaxCLIPVisionModelTester(self )
UpperCamelCase__ : str = FlaxBertModelTester(self )
UpperCamelCase__ : Dict = clip_model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Dict = bert_model_tester.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ : Optional[int] = vision_config_and_inputs
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Tuple = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : List[Any] = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''', logit_scale_init_value=1.0 )
UpperCamelCase__ : int = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
UpperCamelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
UpperCamelCase__ : Any = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''], images=__magic_name__, padding=__magic_name__, return_tensors='''np''' )
UpperCamelCase__ : str = model(**__magic_name__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), )
UpperCamelCase__ : Optional[int] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image, __magic_name__, atol=1E-3 ) )
| 717 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 369 | 0 |
from pathlib import Path
import fire
def a_ ( _A , _A , _A ) -> Dict:
"""simple docstring"""
snake_case__ = Path(UpperCAmelCase_ )
snake_case__ = Path(UpperCAmelCase_ )
dest_dir.mkdir(exist_ok=UpperCAmelCase_ )
for path in src_dir.iterdir():
snake_case__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case__ = dest_dir.joinpath(path.name )
print(UpperCAmelCase_ )
dest_path.open('w' ).write('\n'.join(UpperCAmelCase_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 328 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case_ : Union[str, Any] = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Any = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
snake_case_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a__ ( A__ ):
UpperCAmelCase__ = ['''input_features''']
def __init__( self :Dict , _lowerCamelCase :Optional[Any]=80 , _lowerCamelCase :List[str]=16_000 , _lowerCamelCase :Dict=160 , _lowerCamelCase :Any=30 , _lowerCamelCase :Optional[Any]=400 , _lowerCamelCase :List[str]=0.0 , _lowerCamelCase :List[str]=False , **_lowerCamelCase :Union[str, Any] , ):
'''simple docstring'''
super().__init__(
feature_size=_lowerCamelCase , sampling_rate=_lowerCamelCase , padding_value=_lowerCamelCase , return_attention_mask=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_ : Dict =n_fft
UpperCamelCase_ : Dict =hop_length
UpperCamelCase_ : Optional[int] =chunk_length
UpperCamelCase_ : Dict =chunk_length * sampling_rate
UpperCamelCase_ : Optional[int] =self.n_samples // hop_length
UpperCamelCase_ : Union[str, Any] =sampling_rate
UpperCamelCase_ : Optional[Any] =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_lowerCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=_lowerCamelCase , norm='slaney' , mel_scale='slaney' , )
def lowerCamelCase_ ( self :int , _lowerCamelCase :np.array ):
'''simple docstring'''
UpperCamelCase_ : str =spectrogram(
_lowerCamelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
UpperCamelCase_ : Optional[Any] =log_spec[:, :-1]
UpperCamelCase_ : Dict =np.maximum(_lowerCamelCase , log_spec.max() - 8.0 )
UpperCamelCase_ : str =(log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowerCamelCase_ ( _lowerCamelCase :List[np.ndarray] , _lowerCamelCase :List[np.ndarray] , _lowerCamelCase :float = 0.0 ):
'''simple docstring'''
if attention_mask is not None:
UpperCamelCase_ : Union[str, Any] =np.array(_lowerCamelCase , np.intaa )
UpperCamelCase_ : int =[]
for vector, length in zip(_lowerCamelCase , attention_mask.sum(-1 ) ):
UpperCamelCase_ : Union[str, Any] =(vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
UpperCamelCase_ : Optional[Any] =padding_value
normed_input_values.append(_lowerCamelCase )
else:
UpperCamelCase_ : Union[str, Any] =[(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self :Optional[int] , _lowerCamelCase :Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowerCamelCase :bool = True , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :Optional[Union[str, TensorType]] = None , _lowerCamelCase :Optional[bool] = None , _lowerCamelCase :Optional[str] = "max_length" , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :Optional[int] = None , _lowerCamelCase :Optional[bool] = None , **_lowerCamelCase :Optional[Any] , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
UpperCamelCase_ : List[Any] =isinstance(_lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
UpperCamelCase_ : Union[str, Any] =is_batched_numpy or (
isinstance(_lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCamelCase_ : List[Any] =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_lowerCamelCase , np.ndarray ):
UpperCamelCase_ : int =np.asarray(_lowerCamelCase , dtype=np.floataa )
elif isinstance(_lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCamelCase_ : Any =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCamelCase_ : Union[str, Any] =[np.asarray([raw_speech] ).T]
UpperCamelCase_ : Optional[Any] =BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
UpperCamelCase_ : str =self.pad(
_lowerCamelCase , padding=_lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
UpperCamelCase_ : List[str] =self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
UpperCamelCase_ : Dict =np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
UpperCamelCase_ : Union[str, Any] =padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
UpperCamelCase_ : Optional[int] =[self._np_extract_fbank_features(_lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , _lowerCamelCase ):
UpperCamelCase_ : Optional[int] =[np.asarray(_lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
UpperCamelCase_ : Optional[Any] =input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
UpperCamelCase_ : int =padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
UpperCamelCase_ : List[str] =padded_inputs.convert_to_tensors(_lowerCamelCase )
return padded_inputs
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =copy.deepcopy(self.__dict__ )
UpperCamelCase_ : str =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 395 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class a__ ( A__ ):
def __init__( self :List[str] , *_lowerCamelCase :int , **_lowerCamelCase :str ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 395 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( _lowerCAmelCase , unittest.TestCase ):
UpperCamelCase_ :str = DiTPipeline
UpperCamelCase_ :Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCamelCase_ :List[Any] = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
UpperCamelCase_ :Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase__ = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=SCREAMING_SNAKE_CASE_ , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = AutoencoderKL()
lowerCAmelCase__ = DDIMScheduler()
lowerCAmelCase__ = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any]=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = "cpu"
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCAmelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
lowerCAmelCase__ = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCAmelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
def __snake_case ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=1e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@require_torch_gpu
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def __snake_case ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ):
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
lowerCAmelCase__ = ["vase", "umbrella", "white shark", "white wolf"]
lowerCAmelCase__ = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1e-2
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
lowerCAmelCase__ = ["vase", "umbrella"]
lowerCAmelCase__ = pipe.get_label_ids(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1e-1
| 668 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : CLIPSegForImageSegmentation , lowerCamelCase : CLIPSegProcessor , lowerCamelCase : AutoencoderKL , lowerCamelCase : CLIPTextModel , lowerCamelCase : CLIPTokenizer , lowerCamelCase : UNetaDConditionModel , lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase : StableDiffusionSafetyChecker , lowerCamelCase : CLIPImageProcessor , ) -> Tuple:
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__snake_case : Tuple = (
F'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
F' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : Any = dict(scheduler.config )
__snake_case : List[Any] = 1
__snake_case : Tuple = FrozenDict(lowerCamelCase )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__snake_case : List[str] = (
F'The configuration file of this scheduler: {scheduler} has not set the configuration'
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , lowerCamelCase , standard_warn=lowerCamelCase )
__snake_case : List[str] = dict(scheduler.config )
__snake_case : List[str] = True
__snake_case : Any = FrozenDict(lowerCamelCase )
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=lowerCamelCase , segmentation_processor=lowerCamelCase , vae=lowerCamelCase , text_encoder=lowerCamelCase , tokenizer=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase , safety_checker=lowerCamelCase , feature_extractor=lowerCamelCase , )
def __snake_case ( self : Dict , lowerCamelCase : Optional[Union[str, int]] = "auto" ) -> Tuple:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def __snake_case ( self : List[Any] ) -> Any:
self.enable_attention_slicing(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__snake_case : Optional[int] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase , lowerCamelCase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : int ) -> Any:
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str]] , lowerCamelCase : Union[torch.FloatTensor, PIL.Image.Image] , lowerCamelCase : str , lowerCamelCase : int = 512 , lowerCamelCase : int = 512 , lowerCamelCase : int = 50 , lowerCamelCase : float = 7.5 , lowerCamelCase : Optional[Union[str, List[str]]] = None , lowerCamelCase : Optional[int] = 1 , lowerCamelCase : float = 0.0 , lowerCamelCase : Optional[torch.Generator] = None , lowerCamelCase : Optional[torch.FloatTensor] = None , lowerCamelCase : Optional[str] = "pil" , lowerCamelCase : bool = True , lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase : int = 1 , **lowerCamelCase : Dict , ) -> List[str]:
__snake_case : Tuple = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__snake_case : str = self.segmentation_model(**lowerCamelCase )
__snake_case : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__snake_case : List[Any] = self.numpy_to_pil(lowerCamelCase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__snake_case : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , height=lowerCamelCase , width=lowerCamelCase , num_inference_steps=lowerCamelCase , guidance_scale=lowerCamelCase , negative_prompt=lowerCamelCase , num_images_per_prompt=lowerCamelCase , eta=lowerCamelCase , generator=lowerCamelCase , latents=lowerCamelCase , output_type=lowerCamelCase , return_dict=lowerCamelCase , callback=lowerCamelCase , callback_steps=lowerCamelCase , )
| 81 | 0 |
'''simple docstring'''
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def snake_case_ ( __snake_case : Tuple , __snake_case : Tuple , __snake_case : Any=0) -> List[Any]:
if name is None:
lowerCAmelCase_ = None
else:
lowerCAmelCase_ = """.""" * max(0 , spaces - 2) + """# {:""" + str(50 - spaces) + """s}"""
lowerCAmelCase_ = fmt.format(__UpperCamelCase)
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase):
if msg is not None:
print(__UpperCamelCase)
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2)
elif isinstance(__UpperCamelCase , torch.Tensor):
print(__UpperCamelCase , ''':''' , val.size())
else:
print(__UpperCamelCase , ''':''' , __UpperCamelCase)
def snake_case_ ( __snake_case : str , __snake_case : List[Any] , __snake_case : Dict , __snake_case : str , __snake_case : Any) -> Union[str, Any]:
lowerCAmelCase_ = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowerCAmelCase_ = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowerCAmelCase_ = param.view(*__UpperCamelCase)
lowerCAmelCase_ = param.transpose(0 , 2)
lowerCAmelCase_ = param.transpose(1 , 2).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowerCAmelCase_ = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowerCAmelCase_ = param.view(*__UpperCamelCase)
lowerCAmelCase_ = param.transpose(0 , 1).contiguous()
lowerCAmelCase_ = param.view(*__UpperCamelCase)
return param
def snake_case_ ( __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Union[str, Any]) -> Optional[int]:
lowerCAmelCase_ = {}
# old versions did not store training args
lowerCAmelCase_ = input_state_dict.get('''args''' , __UpperCamelCase)
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowerCAmelCase_ = ds_args.padded_vocab_size
lowerCAmelCase_ = ds_args.max_position_embeddings
lowerCAmelCase_ = ds_args.hidden_size
lowerCAmelCase_ = ds_args.num_layers
lowerCAmelCase_ = ds_args.num_attention_heads
lowerCAmelCase_ = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowerCAmelCase_ = config.n_head
# The hidden_size per head.
lowerCAmelCase_ = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowerCAmelCase_ = input_state_dict["""checkpoint_version"""]
else:
lowerCAmelCase_ = 0.0
# The model.
lowerCAmelCase_ = input_state_dict["""model"""]
# The language model.
lowerCAmelCase_ = model["""language_model"""]
# The embeddings.
lowerCAmelCase_ = lm["""embedding"""]
# The word embeddings.
lowerCAmelCase_ = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowerCAmelCase_ = word_embeddings[: config.vocab_size, :]
lowerCAmelCase_ = word_embeddings
# The position embeddings.
lowerCAmelCase_ = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowerCAmelCase_ = pos_embeddings.size(0)
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''')
# Store the position embeddings.
lowerCAmelCase_ = pos_embeddings
# The transformer.
lowerCAmelCase_ = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowerCAmelCase_ = re.compile(R'''layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)''')
# The simple map of names for "automated" rules.
lowerCAmelCase_ = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowerCAmelCase_ = layer_re.match(__UpperCamelCase)
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowerCAmelCase_ = int(m.group(1))
# The name of the operation.
lowerCAmelCase_ = m.group(2)
# Is it a weight or a bias?
lowerCAmelCase_ = m.group(3)
# The name of the layer.
lowerCAmelCase_ = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('''layernorm'''):
lowerCAmelCase_ = """ln_1""" if op_name.startswith('''input''') else """ln_2"""
lowerCAmelCase_ = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowerCAmelCase_ = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa)).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase)
lowerCAmelCase_ = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowerCAmelCase_ = torch.tensor(-1E4 , dtype=torch.floataa)
lowerCAmelCase_ = masked_bias
lowerCAmelCase_ = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase)
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowerCAmelCase_ = out_val.transpose(0 , 1).contiguous()
# Store.
lowerCAmelCase_ = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowerCAmelCase_ = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase)
# Store. No change of shape.
lowerCAmelCase_ = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowerCAmelCase_ = megatron_to_transformers[op_name]
lowerCAmelCase_ = val.transpose(0 , 1)
# Copy the bias.
elif weight_or_bias == "bias":
lowerCAmelCase_ = megatron_to_transformers[op_name]
lowerCAmelCase_ = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowerCAmelCase_ = transformer["""final_layernorm.weight"""]
lowerCAmelCase_ = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowerCAmelCase_ = word_embeddings
# It should be done!
return output_state_dict
def snake_case_ ( ) -> List[str]:
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--print-checkpoint-structure''' , action='''store_true''')
parser.add_argument(
'''path_to_checkpoint''' , type=__UpperCamelCase , help='''Path to the checkpoint file (.zip archive or direct .pt file)''' , )
parser.add_argument(
'''--config_file''' , default='''''' , type=__UpperCamelCase , help='''An optional config json file describing the pre-trained model.''' , )
lowerCAmelCase_ = parser.parse_args()
# Extract the basename.
lowerCAmelCase_ = os.path.dirname(args.path_to_checkpoint)
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''')
if args.path_to_checkpoint.endswith('''.zip'''):
with zipfile.ZipFile(args.path_to_checkpoint , '''r''') as checkpoint:
with checkpoint.open('''release/mp_rank_00/model_optim_rng.pt''') as pytorch_dict:
lowerCAmelCase_ = torch.load(__UpperCamelCase , map_location='''cpu''')
else:
lowerCAmelCase_ = torch.load(args.path_to_checkpoint , map_location='''cpu''')
lowerCAmelCase_ = input_state_dict.get('''args''' , __UpperCamelCase)
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowerCAmelCase_ = """gelu_fast"""
elif ds_args.openai_gelu:
lowerCAmelCase_ = """gelu_new"""
else:
lowerCAmelCase_ = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowerCAmelCase_ = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowerCAmelCase_ = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.0_2 , summary_type='''cls_index''' , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=50256 , eos_token_id=50256 , )
else:
lowerCAmelCase_ = GPTaConfig.from_json_file(args.config_file)
lowerCAmelCase_ = ["""GPT2LMHeadModel"""]
# Convert.
print('''Converting''')
lowerCAmelCase_ = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase)
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowerCAmelCase_ = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowerCAmelCase_ = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowerCAmelCase_ = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''')
else:
lowerCAmelCase_ = """gpt2"""
lowerCAmelCase_ = AutoTokenizer.from_pretrained(__UpperCamelCase)
lowerCAmelCase_ = type(__UpperCamelCase).__name__
lowerCAmelCase_ = tokenizer_class
# Store the config to file.
print('''Saving config''')
config.save_pretrained(__UpperCamelCase)
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''')
tokenizer.save_pretrained(__UpperCamelCase)
# Store the state_dict to file.
lowerCAmelCase_ = os.path.join(__UpperCamelCase , '''pytorch_model.bin''')
print(F'''Saving checkpoint to "{output_checkpoint_file}"''')
torch.save(__UpperCamelCase , __UpperCamelCase)
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 721 | '''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
A_ : Dict =logging.getLogger(__name__)
def snake_case_ ( __snake_case : torch.nn.Module , __snake_case : BnbQuantizationConfig , __snake_case : Union[str, os.PathLike] = None , __snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , __snake_case : Optional[List[str]] = None , __snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , ) -> List[str]:
lowerCAmelCase_ = bnb_quantization_config.load_in_abit
lowerCAmelCase_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''')
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''')
lowerCAmelCase_ = []
# custom device map
if isinstance(__snake_case , __snake_case) and len(device_map.keys()) > 1:
lowerCAmelCase_ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ = get_keys_to_not_convert(__snake_case)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__snake_case)
lowerCAmelCase_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ = []
lowerCAmelCase_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__snake_case)
# compatibility with peft
lowerCAmelCase_ = load_in_abit
lowerCAmelCase_ = load_in_abit
lowerCAmelCase_ = get_parameter_device(__snake_case)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''')
lowerCAmelCase_ = replace_with_bnb_layers(__snake_case , __snake_case , modules_to_not_convert=__snake_case)
# convert param to the right dtype
lowerCAmelCase_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ = name.replace('''.weight''' , '''''').replace('''.bias''' , '''''')
lowerCAmelCase_ = getattr(__snake_case , __snake_case , __snake_case)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(__snake_case):
param.to(__snake_case)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''')
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''')
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ = replace_with_bnb_layers(
__snake_case , __snake_case , modules_to_not_convert=__snake_case)
lowerCAmelCase_ = get_quantized_model_device_map(
__snake_case , __snake_case , __snake_case , max_memory=__snake_case , no_split_module_classes=__snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ = True
lowerCAmelCase_ = any(x in list(device_map.values()) for x in ['''cpu''', '''disk'''])
load_checkpoint_in_model(
__snake_case , __snake_case , __snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=__snake_case , offload_state_dict=__snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__snake_case , device_map=__snake_case , offload_dir=__snake_case)
def snake_case_ ( __snake_case : int , __snake_case : Optional[Any] , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=None) -> Dict:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''')
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''')
if isinstance(__snake_case , __snake_case):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''')
lowerCAmelCase_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ = {}
lowerCAmelCase_ = special_dtypes
lowerCAmelCase_ = no_split_module_classes
lowerCAmelCase_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ = get_balanced_memory(
__snake_case , low_zero=(device_map == '''balanced_low_0''') , max_memory=__snake_case , **__snake_case , )
lowerCAmelCase_ = max_memory
lowerCAmelCase_ = infer_auto_device_map(__snake_case , **__snake_case)
if isinstance(__snake_case , __snake_case):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''')
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''')
del device_map_without_some_modules
return device_map
def snake_case_ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Dict=None , __snake_case : Optional[int]=None) -> str:
if modules_to_not_convert is None:
lowerCAmelCase_ = []
lowerCAmelCase_ ,lowerCAmelCase_ = _replace_with_bnb_layers(
__snake_case , __snake_case , __snake_case , __snake_case)
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''')
return model
def snake_case_ ( __snake_case : int , __snake_case : int , __snake_case : Optional[int]=None , __snake_case : int=None , ) -> Any:
lowerCAmelCase_ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ = []
current_key_name.append(__snake_case)
if isinstance(__snake_case , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ = '''.'''.join(__snake_case)
lowerCAmelCase_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''')
lowerCAmelCase_ = module.weight.data
if module.bias is not None:
lowerCAmelCase_ = module.bias.data
bnb_module.requires_grad_(__snake_case)
setattr(__snake_case , __snake_case , __snake_case)
lowerCAmelCase_ = True
if len(list(module.children())) > 0:
lowerCAmelCase_ ,lowerCAmelCase_ = _replace_with_bnb_layers(
__snake_case , __snake_case , __snake_case , __snake_case)
lowerCAmelCase_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def snake_case_ ( __snake_case : List[str]) -> Any:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ = deepcopy(__snake_case) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ = find_tied_parameters(__snake_case)
# For compatibility with Accelerate < 0.18
if isinstance(__snake_case , __snake_case):
lowerCAmelCase_ = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ = sum(__snake_case , [])
lowerCAmelCase_ = len(__snake_case) > 0
# Check if it is a base model
lowerCAmelCase_ = False
if hasattr(__snake_case , '''base_model_prefix'''):
lowerCAmelCase_ = not hasattr(__snake_case , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ = list(model.named_children())
lowerCAmelCase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ = set(__snake_case) - set(__snake_case)
lowerCAmelCase_ = list(set(__snake_case)) + list(__snake_case)
# remove ".weight" from the keys
lowerCAmelCase_ = ['''.weight''', '''.bias''']
lowerCAmelCase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ = name.replace(__snake_case , '''''')
filtered_module_names.append(__snake_case)
return filtered_module_names
def snake_case_ ( __snake_case : Union[str, Any]) -> str:
for m in model.modules():
if isinstance(__snake_case , bnb.nn.Linearabit):
return True
return False
def snake_case_ ( __snake_case : nn.Module) -> str:
return next(parameter.parameters()).device
def snake_case_ ( __snake_case : int , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Tuple) -> int:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__snake_case , __snake_case , 0 , dtype=__snake_case , value=__snake_case)
lowerCAmelCase_ = param_name
lowerCAmelCase_ = model
if "." in tensor_name:
lowerCAmelCase_ = tensor_name.split('''.''')
for split in splits[:-1]:
lowerCAmelCase_ = getattr(__snake_case , __snake_case)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ = new_module
lowerCAmelCase_ = splits[-1]
# offload weights
lowerCAmelCase_ = False
offload_weight(module._parameters[tensor_name] , __snake_case , __snake_case , index=__snake_case)
if hasattr(module._parameters[tensor_name] , '''SCB'''):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''') , __snake_case , index=__snake_case , )
else:
offload_weight(__snake_case , __snake_case , __snake_case , index=__snake_case)
offload_weight(__snake_case , param_name.replace('''weight''' , '''SCB''') , __snake_case , index=__snake_case)
set_module_tensor_to_device(__snake_case , __snake_case , '''meta''' , dtype=__snake_case , value=torch.empty(*param.size()))
| 606 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCAmelCase : Optional[int] = {
'''configuration_whisper''': ['''WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''WhisperConfig''', '''WhisperOnnxConfig'''],
'''feature_extraction_whisper''': ['''WhisperFeatureExtractor'''],
'''processing_whisper''': ['''WhisperProcessor'''],
'''tokenization_whisper''': ['''WhisperTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = ['''WhisperTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''WhisperForConditionalGeneration''',
'''WhisperModel''',
'''WhisperPreTrainedModel''',
'''WhisperForAudioClassification''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int = [
'''TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWhisperForConditionalGeneration''',
'''TFWhisperModel''',
'''TFWhisperPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''FlaxWhisperForConditionalGeneration''',
'''FlaxWhisperModel''',
'''FlaxWhisperPreTrainedModel''',
'''FlaxWhisperForAudioClassification''',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58 | '''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowerCamelCase__ ( A_ , A_ , A_ ):
# Construct model
if gpta_config_file == "":
UpperCAmelCase_ = GPTaConfig()
else:
UpperCAmelCase_ = GPTaConfig.from_json_file(A_ )
UpperCAmelCase_ = GPTaModel(A_ )
# Load weights from numpy
load_tf_weights_in_gpta(A_ , A_ , A_ )
# Save pytorch-model
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , A_ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__snake_case : Dict = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 660 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class UpperCamelCase__ ( UpperCAmelCase_ ):
__SCREAMING_SNAKE_CASE : Any = 'albert'
def __init__( self : List[Any] , UpperCamelCase__ : Optional[int]=30_000 , UpperCamelCase__ : Any=128 , UpperCamelCase__ : Any=4_096 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Any=64 , UpperCamelCase__ : List[Any]=16_384 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[Any]="gelu_new" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Tuple=1e-12 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : List[Any]="absolute" , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Tuple=3 , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
lowercase_ = vocab_size
lowercase_ = embedding_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_hidden_groups
lowercase_ = num_attention_heads
lowercase_ = inner_group_num
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = classifier_dropout_prob
lowercase_ = position_embedding_type
class UpperCamelCase__ ( UpperCAmelCase_ ):
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowercase_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase_ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 707 |
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class UpperCamelCase__ :
def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : str=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=False , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : Dict=32 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : Tuple=4 , UpperCamelCase__ : Dict=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : int=None , ):
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def UpperCAmelCase__ ( self : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowercase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , ):
'''simple docstring'''
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , ):
'''simple docstring'''
lowercase_ = True
lowercase_ = True
lowercase_ = OpenLlamaForCausalLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# first forward pass
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
lowercase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase_ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
lowercase_ = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["""hidden_states"""][0]
# select random slice
lowercase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__SCREAMING_SNAKE_CASE : List[str] = (OpenLlamaForCausalLM,) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : List[Any] = (
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = OpenLlamaModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """single_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = """multi_label_classification"""
lowercase_ = input_dict["""input_ids"""]
lowercase_ = input_ids.ne(1 ).to(UpperCamelCase__ )
lowercase_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowercase_ = OpenLlamaForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowercase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = ids_tensor([1, 10] , config.vocab_size )
lowercase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
original_model.to(UpperCamelCase__ )
original_model.eval()
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = original_model(UpperCamelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
lowercase_ = {"""type""": scaling_type, """factor""": 10.0}
lowercase_ = OpenLlamaModel(UpperCamelCase__ )
scaled_model.to(UpperCamelCase__ )
scaled_model.eval()
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
lowercase_ = scaled_model(UpperCamelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5 ) )
| 650 | 0 |
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
SCREAMING_SNAKE_CASE = n - k
# Calculate C(n,k)
for i in range(snake_case__ ):
result *= n - i
result //= i + 1
return result
def __lowerCamelCase (UpperCAmelCase__ : int ):
return binomial_coefficient(2 * node_count , snake_case__ ) // (node_count + 1)
def __lowerCamelCase (UpperCAmelCase__ : int ):
if n < 0:
raise ValueError("factorial() not defined for negative values" )
SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __lowerCamelCase (UpperCAmelCase__ : int ):
return catalan_number(snake_case__ ) * factorial(snake_case__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = int(input('''Enter the number of nodes: ''').strip() or 0)
if node_count <= 0:
raise ValueError('''We need some nodes to work with.''')
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 403 |
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
__snake_case :List[Any] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def UpperCamelCase ( snake_case__ : int = 100 ):
'''simple docstring'''
__snake_case :Tuple = 1
__snake_case :Dict = 2
for i in range(2 ,max_n + 1 ):
__snake_case :Optional[int] = pre_numerator
__snake_case :List[Any] = 2 * i // 3 if i % 3 == 0 else 1
__snake_case :Dict = cur_numerator
__snake_case :Optional[Any] = e_cont * pre_numerator + temp
return sum_digits(snake_case__ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 455 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowercase_ :
A__ : List[str]
A__ : Optional[str] = None
# Automatically constructed
A__ : ClassVar[str] = "dict"
A__ : ClassVar[Any] = None
A__ : str = field(default='''Translation''', init=_UpperCAmelCase, repr=_UpperCAmelCase )
def __call__( self ) ->List[Any]:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowerCamelCase__ ( self ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("string" ) for k in sorted(self.languages )}
@dataclass
class lowercase_ :
A__ : Optional[List] = None
A__ : Optional[int] = None
A__ : Optional[str] = None
# Automatically constructed
A__ : ClassVar[str] = "dict"
A__ : ClassVar[Any] = None
A__ : str = field(default='''TranslationVariableLanguages''', init=_UpperCAmelCase, repr=_UpperCAmelCase )
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
_a = sorted(set(self.languages ) ) if self.languages else None
_a = len(self.languages ) if self.languages else None
def __call__( self ) ->List[Any]:
'''simple docstring'''
return pa.struct({"language": pa.list_(pa.string() ), "translation": pa.list_(pa.string() )} )
def lowerCamelCase__ ( self , a_ ) ->Any:
'''simple docstring'''
_a = set(self.languages )
if self.languages and set(a_ ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(a_ ) - lang_set ) )}) are not in valid set ({', '.join(a_ )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
_a = []
for lang, text in translation_dict.items():
if isinstance(a_ , a_ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
_a , _a = zip(*sorted(a_ ) )
return {"language": languages, "translation": translations}
def lowerCamelCase__ ( self ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("string" ) ),
"translation": Sequence(Value("string" ) ),
}
| 612 |
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ (_UpperCAmelCase ):
A__ : Union[str, Any] = (PNDMScheduler,)
A__ : Optional[int] = (('''num_inference_steps''', 50),)
def lowerCamelCase__ ( self , **a_ ) ->Optional[Any]:
'''simple docstring'''
_a = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a_ )
return config
def lowerCamelCase__ ( self , a_=0 , **a_ ) ->Tuple:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config(**a_ )
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_a = scheduler_class.from_pretrained(a_ )
new_scheduler.set_timesteps(a_ )
# copy over dummy past residuals
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
pass
def lowerCamelCase__ ( self , a_=0 , **a_ ) ->Tuple:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
_a = self.dummy_sample
_a = 0.1 * sample
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# copy over dummy past residuals (must be after setting timesteps)
_a = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(a_ )
_a = scheduler_class.from_pretrained(a_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(a_ )
# copy over dummy past residual (must be after setting timesteps)
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_prk(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_a = scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
_a = new_scheduler.step_plms(a_ , a_ , a_ , **a_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCamelCase__ ( self , **a_ ) ->Optional[int]:
'''simple docstring'''
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(**a_ )
_a = scheduler_class(**a_ )
_a = 1_0
_a = self.dummy_model()
_a = self.dummy_sample_deter
scheduler.set_timesteps(a_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_a = model(a_ , a_ )
_a = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_a = model(a_ , a_ )
_a = scheduler.step_plms(a_ , a_ , a_ ).prev_sample
return sample
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
_a = dict(self.forward_default_kwargs )
_a = kwargs.pop("num_inference_steps" , a_ )
for scheduler_class in self.scheduler_classes:
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
_a = self.dummy_sample
_a = 0.1 * sample
if num_inference_steps is not None and hasattr(a_ , "set_timesteps" ):
scheduler.set_timesteps(a_ )
elif num_inference_steps is not None and not hasattr(a_ , "set_timesteps" ):
_a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_a = dummy_past_residuals[:]
_a = scheduler.step_prk(a_ , 0 , a_ , **a_ ).prev_sample
_a = scheduler.step_prk(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_a = scheduler.step_plms(a_ , 0 , a_ , **a_ ).prev_sample
_a = scheduler.step_plms(a_ , 1 , a_ , **a_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=a_ )
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=a_ )
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(steps_offset=1 )
_a = scheduler_class(**a_ )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def lowerCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=a_ , beta_end=a_ )
def lowerCamelCase__ ( self ) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a_ )
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=a_ )
def lowerCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=a_ )
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
_a = 2_7
for scheduler_class in self.scheduler_classes:
_a = self.dummy_sample
_a = 0.1 * sample
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.set_timesteps(a_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_a = scheduler.step_prk(a_ , a_ , a_ ).prev_sample
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
with self.assertRaises(a_ ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**a_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
_a = self.full_loop()
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 198.1_318 ) < 1E-2
assert abs(result_mean.item() - 0.2_580 ) < 1E-3
def lowerCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
_a = self.full_loop(prediction_type="v_prediction" )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 67.3_986 ) < 1E-2
assert abs(result_mean.item() - 0.0_878 ) < 1E-3
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
_a = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 230.0_399 ) < 1E-2
assert abs(result_mean.item() - 0.2_995 ) < 1E-3
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
_a = self.full_loop(set_alpha_to_one=a_ , beta_start=0.01 )
_a = torch.sum(torch.abs(a_ ) )
_a = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 186.9_482 ) < 1E-2
assert abs(result_mean.item() - 0.2_434 ) < 1E-3
| 612 | 1 |
from __future__ import annotations
import requests
def __a ( __UpperCAmelCase ):
a__ = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(__UpperCAmelCase ).json()
def __a ( __UpperCAmelCase = 10 ):
a__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
a__ = requests.get(__UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__UpperCAmelCase ) for story_id in story_ids]
def __a ( __UpperCAmelCase = 10 ):
a__ = hackernews_top_stories(__UpperCAmelCase )
return "\n".join('''* [{title}]({url})'''.format(**__UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 194 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def __a ( __UpperCAmelCase ):
if "model" in orig_key:
a__ = orig_key.replace('''model.''' , '''''' )
if "norm1" in orig_key:
a__ = orig_key.replace('''norm1''' , '''attention.output.LayerNorm''' )
if "norm2" in orig_key:
a__ = orig_key.replace('''norm2''' , '''output.LayerNorm''' )
if "norm" in orig_key:
a__ = orig_key.replace('''norm''' , '''LayerNorm''' )
if "transformer" in orig_key:
a__ = orig_key.split('''.''' )[0].split('''_''' )[-1]
a__ = orig_key.replace(f"transformer_{layer_num}" , f"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
a__ = orig_key.replace('''mha.attn''' , '''attention.self''' )
if "mha" in orig_key:
a__ = orig_key.replace('''mha''' , '''attention''' )
if "W_q" in orig_key:
a__ = orig_key.replace('''W_q''' , '''self.query''' )
if "W_k" in orig_key:
a__ = orig_key.replace('''W_k''' , '''self.key''' )
if "W_v" in orig_key:
a__ = orig_key.replace('''W_v''' , '''self.value''' )
if "ff1" in orig_key:
a__ = orig_key.replace('''ff1''' , '''intermediate.dense''' )
if "ff2" in orig_key:
a__ = orig_key.replace('''ff2''' , '''output.dense''' )
if "ff" in orig_key:
a__ = orig_key.replace('''ff''' , '''output.dense''' )
if "mlm_class" in orig_key:
a__ = orig_key.replace('''mlm.mlm_class''' , '''cls.predictions.decoder''' )
if "mlm" in orig_key:
a__ = orig_key.replace('''mlm''' , '''cls.predictions.transform''' )
if "cls" not in orig_key:
a__ = '''yoso.''' + orig_key
return orig_key
def __a ( __UpperCAmelCase , __UpperCAmelCase ):
for key in orig_state_dict.copy().keys():
a__ = orig_state_dict.pop(__UpperCAmelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
a__ = val
a__ = orig_state_dict['''cls.predictions.decoder.bias''']
a__ = torch.arange(__UpperCAmelCase ).expand((1, -1) ) + 2
return orig_state_dict
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = torch.load(__UpperCAmelCase , map_location='''cpu''' )['''model_state_dict''']
a__ = YosoConfig.from_json_file(__UpperCAmelCase )
a__ = YosoForMaskedLM(__UpperCAmelCase )
a__ = convert_checkpoint_helper(config.max_position_embeddings , __UpperCAmelCase )
print(model.load_state_dict(__UpperCAmelCase ) )
model.eval()
model.save_pretrained(__UpperCAmelCase )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ : Any = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 194 | 1 |
'''simple docstring'''
import collections
import os
import re
from pathlib import Path
_UpperCAmelCase : int = '''src/transformers'''
# Matches is_xxx_available()
_UpperCAmelCase : int = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
_UpperCAmelCase : int = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_UpperCAmelCase : Optional[int] = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
_UpperCAmelCase : Dict = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
_UpperCAmelCase : Any = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_UpperCAmelCase : str = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
_UpperCAmelCase : Tuple = re.compile(r"""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
_UpperCAmelCase : Any = re.compile(r"""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
_UpperCAmelCase : str = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
_UpperCAmelCase : Optional[int] = re.compile(r"""^\s*try:""")
# Catches a line with else:
_UpperCAmelCase : Dict = re.compile(r"""^\s*else:""")
def __magic_name__( lowerCamelCase):
if _re_test_backend.search(lowerCamelCase) is None:
return None
__lowerCAmelCase = [b[0] for b in _re_backend.findall(lowerCamelCase)]
backends.sort()
return "_and_".join(lowerCamelCase)
def __magic_name__( lowerCamelCase):
with open(lowerCamelCase, '''r''', encoding='''utf-8''', newline='''\n''') as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = 0
while line_index < len(lowerCamelCase) and not lines[line_index].startswith('''_import_structure = {'''):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowerCamelCase):
return None
# First grab the objects without a specific backend in _import_structure
__lowerCAmelCase = []
while not lines[line_index].startswith('''if TYPE_CHECKING''') and find_backend(lines[line_index]) is None:
__lowerCAmelCase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowerCamelCase):
__lowerCAmelCase = _re_one_line_import_struct.search(lowerCamelCase).groups()[0]
__lowerCAmelCase = re.findall(r'''\[([^\]]+)\]''', lowerCamelCase)
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''')])
line_index += 1
continue
__lowerCAmelCase = _re_import_struct_key_value.search(lowerCamelCase)
if single_line_import_search is not None:
__lowerCAmelCase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''') if len(lowerCamelCase) > 0]
objects.extend(lowerCamelCase)
elif line.startswith(''' ''' * 8 + '''\"'''):
objects.append(line[9:-3])
line_index += 1
__lowerCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING'''):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowerCAmelCase = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
__lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 4):
__lowerCAmelCase = lines[line_index]
if _re_import_struct_add_one.search(lowerCamelCase) is not None:
objects.append(_re_import_struct_add_one.search(lowerCamelCase).groups()[0])
elif _re_import_struct_add_many.search(lowerCamelCase) is not None:
__lowerCAmelCase = _re_import_struct_add_many.search(lowerCamelCase).groups()[0].split(''', ''')
__lowerCAmelCase = [obj[1:-1] for obj in imports if len(lowerCamelCase) > 0]
objects.extend(lowerCamelCase)
elif _re_between_brackets.search(lowerCamelCase) is not None:
__lowerCAmelCase = _re_between_brackets.search(lowerCamelCase).groups()[0].split(''', ''')
__lowerCAmelCase = [obj[1:-1] for obj in imports if len(lowerCamelCase) > 0]
objects.extend(lowerCamelCase)
elif _re_quote_object.search(lowerCamelCase) is not None:
objects.append(_re_quote_object.search(lowerCamelCase).groups()[0])
elif line.startswith(''' ''' * 8 + '''\"'''):
objects.append(line[9:-3])
elif line.startswith(''' ''' * 1_2 + '''\"'''):
objects.append(line[1_3:-3])
line_index += 1
__lowerCAmelCase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowerCAmelCase = []
while (
line_index < len(lowerCamelCase)
and find_backend(lines[line_index]) is None
and not lines[line_index].startswith('''else''')
):
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_import.search(lowerCamelCase)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 8):
objects.append(line[8:-2])
line_index += 1
__lowerCAmelCase = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(lowerCamelCase):
# If the line is an if is_backend_available, we grab all objects associated.
__lowerCAmelCase = find_backend(lines[line_index])
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1]) is None:
__lowerCAmelCase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index]) is None:
line_index += 1
line_index += 1
__lowerCAmelCase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(''' ''' * 8):
__lowerCAmelCase = lines[line_index]
__lowerCAmelCase = _re_import.search(lowerCamelCase)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', '''))
elif line.startswith(''' ''' * 1_2):
objects.append(line[1_2:-2])
line_index += 1
__lowerCAmelCase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __magic_name__( lowerCamelCase, lowerCamelCase):
def find_duplicates(lowerCamelCase):
return [k for k, v in collections.Counter(lowerCamelCase).items() if v > 1]
if list(import_dict_objects.keys()) != list(type_hint_objects.keys()):
return ["Both sides of the init do not have the same backends!"]
__lowerCAmelCase = []
for key in import_dict_objects.keys():
__lowerCAmelCase = find_duplicates(import_dict_objects[key])
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""")
__lowerCAmelCase = find_duplicates(type_hint_objects[key])
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""")
if sorted(set(import_dict_objects[key])) != sorted(set(type_hint_objects[key])):
__lowerCAmelCase = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""")
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""")
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""")
return errors
def __magic_name__( ):
__lowerCAmelCase = []
for root, _, files in os.walk(lowerCamelCase):
if "__init__.py" in files:
__lowerCAmelCase = os.path.join(lowerCamelCase, '''__init__.py''')
__lowerCAmelCase = parse_init(lowerCamelCase)
if objects is not None:
__lowerCAmelCase = analyze_results(*lowerCamelCase)
if len(lowerCamelCase) > 0:
__lowerCAmelCase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(lowerCamelCase))
if len(lowerCamelCase) > 0:
raise ValueError('''\n\n'''.join(lowerCamelCase))
def __magic_name__( ):
__lowerCAmelCase = []
for path, directories, files in os.walk(lowerCamelCase):
for folder in directories:
# Ignore private modules
if folder.startswith('''_'''):
directories.remove(lowerCamelCase)
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowerCamelCase) / folder).glob('''*.py'''))) == 0:
continue
__lowerCAmelCase = str((Path(lowerCamelCase) / folder).relative_to(lowerCamelCase))
__lowerCAmelCase = short_path.replace(os.path.sep, '''.''')
submodules.append(lowerCamelCase)
for fname in files:
if fname == "__init__.py":
continue
__lowerCAmelCase = str((Path(lowerCamelCase) / fname).relative_to(lowerCamelCase))
__lowerCAmelCase = short_path.replace('''.py''', '''''').replace(os.path.sep, '''.''')
if len(submodule.split('''.''')) == 1:
submodules.append(lowerCamelCase)
return submodules
_UpperCAmelCase : List[str] = [
'''convert_pytorch_checkpoint_to_tf2''',
'''modeling_flax_pytorch_utils''',
'''models.esm.openfold_utils''',
]
def __magic_name__( ):
from transformers.utils import direct_transformers_import
__lowerCAmelCase = direct_transformers_import(lowerCamelCase)
__lowerCAmelCase = set(transformers._import_structure.keys())
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(lowerCamelCase, '''__init__.py'''), '''r''') as f:
__lowerCAmelCase = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''', lowerCamelCase)))
__lowerCAmelCase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(lowerCamelCase) > 0:
__lowerCAmelCase = '''\n'''.join(F"""- {module}""" for module in module_not_registered)
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''')
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 717 |
'''simple docstring'''
import json
import sys
def __magic_name__( lowerCamelCase, lowerCamelCase):
with open(lowerCamelCase, encoding='''utf-8''') as f:
__lowerCAmelCase = json.load(lowerCamelCase)
__lowerCAmelCase = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(lowerCamelCase):
__lowerCAmelCase = results[benchmark_name]
__lowerCAmelCase = benchmark_name.split('''/''')[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""")
__lowerCAmelCase = '''| metric |'''
__lowerCAmelCase = '''|--------|'''
__lowerCAmelCase = '''| new / old (diff) |'''
for metric_name in sorted(lowerCamelCase):
__lowerCAmelCase = benchmark_res[metric_name]
__lowerCAmelCase = metric_vals['''new''']
__lowerCAmelCase = metric_vals.get('''old''', lowerCamelCase)
__lowerCAmelCase = metric_vals.get('''diff''', lowerCamelCase)
__lowerCAmelCase = F""" {new_val:f}""" if isinstance(lowerCamelCase, (int, float)) else '''None'''
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(lowerCamelCase, (int, float)) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(lowerCamelCase, (int, float)) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('''</details>''')
with open(lowerCamelCase, '''w''', encoding='''utf-8''') as f:
f.writelines('''\n'''.join(lowerCamelCase))
if __name__ == "__main__":
_UpperCAmelCase : str = sys.argv[1]
_UpperCAmelCase : Optional[Any] = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 474 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "vit_msn"
def __init__( self : Optional[int] , __snake_case : Optional[Any]=7_6_8 , __snake_case : Dict=1_2 , __snake_case : int=1_2 , __snake_case : Optional[int]=3_0_7_2 , __snake_case : Any="gelu" , __snake_case : str=0.0 , __snake_case : List[Any]=0.0 , __snake_case : str=0.02 , __snake_case : Optional[int]=1E-06 , __snake_case : List[Any]=2_2_4 , __snake_case : int=1_6 , __snake_case : List[Any]=3 , __snake_case : List[Any]=True , **__snake_case : Optional[int] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: int = hidden_size
__magic_name__: int = num_hidden_layers
__magic_name__: Tuple = num_attention_heads
__magic_name__: List[str] = intermediate_size
__magic_name__: List[Any] = hidden_act
__magic_name__: Optional[int] = hidden_dropout_prob
__magic_name__: List[Any] = attention_probs_dropout_prob
__magic_name__: Optional[int] = initializer_range
__magic_name__: Tuple = layer_norm_eps
__magic_name__: Dict = image_size
__magic_name__: Union[str, Any] = patch_size
__magic_name__: Optional[Any] = num_channels
__magic_name__: str = qkv_bias
| 96 |
def __snake_case ( __UpperCamelCase : int = 50 ):
"""simple docstring"""
A_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 ,5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }") | 86 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705 |
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class UpperCAmelCase_ ( snake_case ):
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[Any]:
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
__lowercase : int = {}
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]:
__lowercase : Optional[int] = super().add_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=1 , **UpperCamelCase_ ) -> Optional[Any]:
__lowercase : Union[str, Any] = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
else:
__lowercase : List[Any] = []
for i in range(UpperCamelCase_ ):
__lowercase : List[str] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ )
output.append(UpperCamelCase_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
__lowercase : int = output
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 ) -> Tuple:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowercase : Optional[Any] = []
for i in range(len(UpperCamelCase_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
__lowercase : List[Any] = self.token_map[placeholder_token]
__lowercase : Optional[Any] = tokens[: 1 + int(len(UpperCamelCase_ ) * prop_tokens_to_load )]
if vector_shuffle:
__lowercase : int = copy.copy(UpperCamelCase_ )
random.shuffle(UpperCamelCase_ )
__lowercase : Tuple = text.replace(UpperCamelCase_ , ''' '''.join(UpperCamelCase_ ) )
return text
def __call__( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Optional[Any]:
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> int:
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
| 523 | 0 |
import numpy as np
from transformers import Pipeline
def lowercase ( _a ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = np.max(snake_case__ ,axis=-1 ,keepdims=snake_case__ )
UpperCAmelCase_: Optional[int] = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 ,keepdims=snake_case__ )
class UpperCAmelCase__ ( __lowerCAmelCase ):
def snake_case_ ( self , **A__ ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = {}
if "second_text" in kwargs:
UpperCAmelCase_: Optional[Any] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def snake_case_ ( self , A__ , A__=None ):
"""simple docstring"""
return self.tokenizer(lowerCAmelCase_ , text_pair=lowerCAmelCase_ , return_tensors=self.framework )
def snake_case_ ( self , A__ ):
"""simple docstring"""
return self.model(**lowerCAmelCase_ )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: Dict = model_outputs.logits[0].numpy()
UpperCAmelCase_: Any = softmax(lowerCAmelCase_ )
UpperCAmelCase_: List[Any] = np.argmax(lowerCAmelCase_ )
UpperCAmelCase_: Any = self.model.config.idalabel[best_class]
UpperCAmelCase_: List[str] = probabilities[best_class].item()
UpperCAmelCase_: str = logits.tolist()
return {"label": label, "score": score, "logits": logits} | 137 | from collections.abc import Iterable
from typing import Any
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None ) -> List[str]:
_A = value
_A = None # Added in order to delete a node easier
_A = None
_A = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({F'''{self.value}''': (self.left, self.right)} , indent=1 )
class a :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ = None ) -> List[str]:
_A = root
def __str__( self ) -> str:
return str(self.root )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if new_children is not None: # reset its kids
_A = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowerCAmelCase_ ): # If it is the right children
_A = new_children
else:
_A = new_children
else:
_A = new_children
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def UpperCAmelCase ( self ) -> bool:
return self.root is None
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
_A = Node(lowerCAmelCase_ ) # create a new Node
if self.empty(): # if Tree is empty
_A = new_node # set its root
else: # Tree is not empty
_A = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_A = new_node # We insert the new node in a leaf
break
else:
_A = parent_node.left
else:
if parent_node.right is None:
_A = new_node
break
else:
_A = parent_node.right
_A = parent_node
def UpperCAmelCase ( self , *lowerCAmelCase_ ) -> None:
for value in values:
self.__insert(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Node | None:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
_A = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_A = node.left if value < node.value else node.right
return node
def UpperCAmelCase ( self , lowerCAmelCase_ = None ) -> Node | None:
if node is None:
if self.root is None:
return None
_A = self.root
if not self.empty():
while node.right is not None:
_A = node.right
return node
def UpperCAmelCase ( self , lowerCAmelCase_ = None ) -> Node | None:
if node is None:
_A = self.root
if self.root is None:
return None
if not self.empty():
_A = self.root
while node.left is not None:
_A = node.left
return node
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> None:
_A = self.search(lowerCAmelCase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowerCAmelCase_ , lowerCAmelCase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowerCAmelCase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowerCAmelCase_ , node.left )
else:
_A = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_A = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def UpperCAmelCase ( self , lowerCAmelCase_=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> None:
if node:
self.inorder(lowerCAmelCase_ , node.left )
arr.append(node.value )
self.inorder(lowerCAmelCase_ , node.right )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_A = []
self.inorder(lowerCAmelCase_ , lowerCAmelCase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def snake_case ( snake_case__ :Node | None) -> list[Node]:
_A = []
if curr_node is not None:
_A = postorder(curr_node.left) + postorder(curr_node.right) + [curr_node]
return node_list
def snake_case ( ) -> None:
_A = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_A = BinarySearchTree()
for i in testlist:
t.insert(snake_case__)
# Prints all the elements of the list in order traversal
print(snake_case__)
if t.search(6) is not None:
print("""The value 6 exists""")
else:
print("""The value 6 doesn't exist""")
if t.search(-1) is not None:
print("""The value -1 exists""")
else:
print("""The value -1 doesn't exist""")
if not t.empty():
print("""Max Value: """ , t.get_max().value) # type: ignore
print("""Min Value: """ , t.get_min().value) # type: ignore
for i in testlist:
t.remove(snake_case__)
print(snake_case__)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 401 | 0 |
def UpperCAmelCase ( A__ , A__ ) -> None:
_snake_case : str = len(lowerCamelCase__ )
print("""The following activities are selected:""" )
# The first activity is always selected
_snake_case : int = 0
print(lowerCamelCase__ , end=""",""" )
# Consider rest of the activities
for j in range(lowerCamelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCamelCase__ , end=""",""" )
_snake_case : Tuple = j
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ = [1, 3, 0, 5, 8, 5]
UpperCAmelCase_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 710 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'''salesforce/blip2-opt-2.7b''': '''https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json''',
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip_2_vision_model'
def __init__( self , SCREAMING_SNAKE_CASE__=14_08 , SCREAMING_SNAKE_CASE__=61_44 , SCREAMING_SNAKE_CASE__=39 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0_0001 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=1e-10 , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = hidden_size
_snake_case : int = intermediate_size
_snake_case : int = num_hidden_layers
_snake_case : Dict = num_attention_heads
_snake_case : Tuple = patch_size
_snake_case : Optional[int] = image_size
_snake_case : Tuple = initializer_range
_snake_case : List[str] = attention_dropout
_snake_case : Any = layer_norm_eps
_snake_case : int = hidden_act
_snake_case : List[Any] = qkv_bias
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Any = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
_snake_case : List[Any] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip_2_qformer'
def __init__( self , SCREAMING_SNAKE_CASE__=3_05_22 , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1e-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=14_08 , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
_snake_case : int = vocab_size
_snake_case : Optional[Any] = hidden_size
_snake_case : Dict = num_hidden_layers
_snake_case : Union[str, Any] = num_attention_heads
_snake_case : str = hidden_act
_snake_case : Dict = intermediate_size
_snake_case : int = hidden_dropout_prob
_snake_case : Optional[int] = attention_probs_dropout_prob
_snake_case : str = max_position_embeddings
_snake_case : Tuple = initializer_range
_snake_case : str = layer_norm_eps
_snake_case : Optional[int] = position_embedding_type
_snake_case : Any = cross_attention_frequency
_snake_case : int = encoder_hidden_size
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
_snake_case , _snake_case : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
_snake_case : Optional[Any] = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'blip-2'
SCREAMING_SNAKE_CASE_ = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=32 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
_snake_case : Any = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
_snake_case : Union[str, Any] = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
_snake_case : str = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_snake_case : Union[str, Any] = BlipaVisionConfig(**SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_snake_case : Union[str, Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
_snake_case : Tuple = self.text_config.tie_word_embeddings
_snake_case : Optional[int] = self.text_config.is_encoder_decoder
_snake_case : Tuple = num_query_tokens
_snake_case : Tuple = self.vision_config.hidden_size
_snake_case : int = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_snake_case : List[str] = 1.0
_snake_case : int = 0.02
@classmethod
def __lowerCamelCase( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : Any = copy.deepcopy(self.__dict__ )
_snake_case : Union[str, Any] = self.vision_config.to_dict()
_snake_case : Optional[int] = self.qformer_config.to_dict()
_snake_case : str = self.text_config.to_dict()
_snake_case : Optional[Any] = self.__class__.model_type
return output
| 519 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Optional[Any] = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
lowerCamelCase : Any = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
lowerCamelCase : Optional[Any] = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
lowerCamelCase : int = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 460 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE () -> Dict:
"""simple docstring"""
lowercase__ = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
lowercase__ = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(A )
DownloadCommand.register_subcommand(A )
EnvironmentCommand.register_subcommand(A )
RunCommand.register_subcommand(A )
ServeCommand.register_subcommand(A )
UserCommands.register_subcommand(A )
AddNewModelCommand.register_subcommand(A )
AddNewModelLikeCommand.register_subcommand(A )
LfsCommands.register_subcommand(A )
PTtoTFCommand.register_subcommand(A )
# Let's go
lowercase__ = parser.parse_args()
if not hasattr(A , '''func''' ):
parser.print_help()
exit(1 )
# Run
lowercase__ = args.func(A )
service.run()
if __name__ == "__main__":
main()
| 460 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCamelCase_ = logging.get_logger(__name__)
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ ) -> List[Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
__UpperCAmelCase =XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =XLMProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
else:
__UpperCAmelCase =ProphetNetForConditionalGenerationOld.from_pretrained(snake_case__ )
__UpperCAmelCase , __UpperCAmelCase =ProphetNetForConditionalGeneration.from_pretrained(
snake_case__ , output_loading_info=snake_case__ )
__UpperCAmelCase =['''key_proj''', '''value_proj''', '''query_proj''']
__UpperCAmelCase ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__UpperCAmelCase =key.split('''.''' )
if attributes[0] == "lm_head":
__UpperCAmelCase =prophet
__UpperCAmelCase =prophet_old
else:
__UpperCAmelCase =prophet.prophetnet
__UpperCAmelCase =prophet_old.model
__UpperCAmelCase =False
for attribute in attributes:
if attribute in mapping:
__UpperCAmelCase =mapping[attribute]
if not hasattr(snake_case__ , snake_case__ ) and len(snake_case__ ) > 0:
__UpperCAmelCase =attribute
elif hasattr(snake_case__ , snake_case__ ):
__UpperCAmelCase =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.weight
logger.info(f"""{attribute} is initialized.""" )
__UpperCAmelCase =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__UpperCAmelCase =old_model.bias
logger.info(f"""{attribute} is initialized""" )
__UpperCAmelCase =True
break
elif attribute in special_keys and hasattr(snake_case__ , '''in_proj_weight''' ):
__UpperCAmelCase =old_model.in_proj_weight.shape[0] // 3
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__UpperCAmelCase =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__UpperCAmelCase =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__UpperCAmelCase =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__UpperCAmelCase =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__UpperCAmelCase =True
break
if attribute.isdigit():
__UpperCAmelCase =model[int(snake_case__ )]
__UpperCAmelCase =old_model[int(snake_case__ )]
else:
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if old_attribute == "":
__UpperCAmelCase =old_model
else:
if not hasattr(snake_case__ , snake_case__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
__UpperCAmelCase =getattr(snake_case__ , snake_case__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 142 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
UpperCamelCase_ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def SCREAMING_SNAKE_CASE ( snake_case__ ) -> str:
__UpperCAmelCase ={}
state_dict.pop('''pixel_mean''' , snake_case__ )
state_dict.pop('''pixel_std''' , snake_case__ )
__UpperCAmelCase =r'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__UpperCAmelCase =key.replace(snake_case__ , snake_case__ )
if re.match(snake_case__ , snake_case__ ):
__UpperCAmelCase =int(re.match(snake_case__ , snake_case__ ).group(2 ) )
if layer_nb == 0:
__UpperCAmelCase =key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
__UpperCAmelCase =key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
__UpperCAmelCase =key.replace('''layers.2''' , '''proj_out''' )
__UpperCAmelCase =value
__UpperCAmelCase =model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case__ , snake_case__ , snake_case__ , snake_case__="ybelkada/segment-anything" ) -> Optional[int]:
__UpperCAmelCase =hf_hub_download(snake_case__ , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
__UpperCAmelCase =SamConfig()
elif "sam_vit_l" in model_name:
__UpperCAmelCase =SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__UpperCAmelCase =SamConfig(
vision_config=snake_case__ , )
elif "sam_vit_h" in model_name:
__UpperCAmelCase =SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__UpperCAmelCase =SamConfig(
vision_config=snake_case__ , )
__UpperCAmelCase =torch.load(snake_case__ , map_location='''cpu''' )
__UpperCAmelCase =replace_keys(snake_case__ )
__UpperCAmelCase =SamImageProcessor()
__UpperCAmelCase =SamProcessor(image_processor=snake_case__ )
__UpperCAmelCase =SamModel(snake_case__ )
hf_model.load_state_dict(snake_case__ )
__UpperCAmelCase =hf_model.to('''cuda''' )
__UpperCAmelCase ='''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
__UpperCAmelCase =Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('''RGB''' )
__UpperCAmelCase =[[[400, 650]]]
__UpperCAmelCase =[[1]]
__UpperCAmelCase =processor(images=np.array(snake_case__ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
__UpperCAmelCase =processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
__UpperCAmelCase =((75, 275, 1725, 850),)
__UpperCAmelCase =processor(images=np.array(snake_case__ ) , input_boxes=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
__UpperCAmelCase =[[[400, 650], [800, 650]]]
__UpperCAmelCase =[[1, 1]]
__UpperCAmelCase =processor(
images=np.array(snake_case__ ) , input_points=snake_case__ , input_labels=snake_case__ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
__UpperCAmelCase =hf_model(**snake_case__ )
__UpperCAmelCase =output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
UpperCamelCase_ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 142 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.