code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_snake_case : Union[str, Any] = ["bert-base-uncased", "bert-base-cased"]
_snake_case : Dict = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class a (tf.keras.Model ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCamelCase : List[Any] ) -> Dict:
super().__init__()
__snake_case : List[Any] = tokenizer
__snake_case : Dict = AutoConfig.from_pretrained(lowerCamelCase )
__snake_case : Dict = TFAutoModel.from_config(lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : Optional[Any] ) -> Optional[int]:
__snake_case : int = self.tokenizer(lowerCamelCase )
__snake_case : Dict = self.bert(**lowerCamelCase )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any] ) -> Optional[Any]:
super().setUp()
__snake_case : str = [
BertTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
__snake_case : str = [TFBertTokenizer.from_pretrained(lowerCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(lowerCamelCase , use_fast_bert_tokenizer=lowerCamelCase )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__snake_case : Optional[int] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
__snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def __snake_case ( self : Tuple ) -> str:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
__snake_case : Optional[Any] = tokenizer(lowerCamelCase , return_tensors="tf" , padding="longest" )
__snake_case : int = tf_tokenizer(lowerCamelCase )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def __snake_case ( self : Any ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
__snake_case : List[Any] = tf_tokenizer(self.paired_sentences )
__snake_case : List[Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def __snake_case ( self : Dict ) -> Optional[int]:
for tf_tokenizer in self.tf_tokenizers:
__snake_case : Optional[int] = tf.function(lowerCamelCase )
for test_inputs in (self.test_sentences, self.paired_sentences):
__snake_case : Tuple = tf.constant(lowerCamelCase )
__snake_case : int = compiled_tokenizer(lowerCamelCase )
__snake_case : str = tf_tokenizer(lowerCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def __snake_case ( self : Union[str, Any] ) -> str:
for tf_tokenizer in self.tf_tokenizers:
__snake_case : Optional[int] = ModelToSave(tokenizer=lowerCamelCase )
__snake_case : Any = tf.convert_to_tensor(self.test_sentences )
__snake_case : Any = model(lowerCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__snake_case : Optional[Any] = Path(lowerCamelCase ) / "saved.model"
model.save(lowerCamelCase )
__snake_case : Tuple = tf.keras.models.load_model(lowerCamelCase )
__snake_case : Optional[Any] = loaded_model(lowerCamelCase )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1E-5 )
| 81 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 0 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ = 50 ):
UpperCAmelCase_ = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F"{solution() = }")
| 82 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 0 |
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __snake_case ( _lowercase):
snake_case__ : Tuple = ["image_processor", "tokenizer"]
snake_case__ : Dict = "BlipImageProcessor"
snake_case__ : Optional[Any] = "AutoTokenizer"
def __init__( self : str , __lowerCAmelCase : Any , __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
super().__init__(__lowerCAmelCase , __lowerCAmelCase )
# add QFormer tokenizer
_lowerCamelCase : Optional[Any] = qformer_tokenizer
def __call__( self : Optional[Any] , __lowerCAmelCase : ImageInput = None , __lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , __lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , **__lowerCAmelCase : List[Any] , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError('''You have to specify at least images or text.''' )
_lowerCamelCase : int = BatchFeature()
if text is not None:
_lowerCamelCase : Union[str, Any] = self.tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
encoding.update(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.qformer_tokenizer(
text=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , stride=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , return_special_tokens_mask=__lowerCAmelCase , return_offsets_mapping=__lowerCAmelCase , return_token_type_ids=__lowerCAmelCase , return_length=__lowerCAmelCase , verbose=__lowerCAmelCase , return_tensors=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = qformer_text_encoding.pop('''input_ids''' )
_lowerCamelCase : Optional[Any] = qformer_text_encoding.pop('''attention_mask''' )
if images is not None:
_lowerCamelCase : str = self.image_processor(__lowerCAmelCase , return_tensors=__lowerCAmelCase )
encoding.update(__lowerCAmelCase )
return encoding
def SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCAmelCase : str , **__lowerCAmelCase : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowerCAmelCase , **__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowerCAmelCase , **__lowerCAmelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : str = self.tokenizer.model_input_names
_lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : Dict , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
if os.path.isfile(__lowerCAmelCase ):
raise ValueError(f'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
_lowerCamelCase : List[Any] = os.path.join(__lowerCAmelCase , '''qformer_tokenizer''' )
self.qformer_tokenizer.save_pretrained(__lowerCAmelCase )
return super().save_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(__lowerCAmelCase , subfolder='''qformer_tokenizer''' )
_lowerCamelCase : Optional[Any] = cls._get_arguments_from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
args.append(__lowerCAmelCase )
return cls(*__lowerCAmelCase )
| 83 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 0 |
UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 84 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 0 |
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
def _a ( lowercase__ : Union[tf.Tensor, np.ndarray] ):
'''simple docstring'''
if isinstance(lowercase__ , np.ndarray ):
return list(tensor.shape )
SCREAMING_SNAKE_CASE__ : str = tf.shape(lowercase__ )
if tensor.shape == tf.TensorShape(lowercase__ ):
return dynamic
SCREAMING_SNAKE_CASE__ : Optional[Any] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowercase__ )]
def _a ( lowercase__ : tf.Tensor , lowercase__ : Optional[int] = None , lowercase__ : Optional[str] = None ):
'''simple docstring'''
return tf.nn.softmax(logits=logits + 1E-9 , axis=lowercase__ , name=lowercase__ )
def _a ( lowercase__ : List[Any] , lowercase__ : Optional[int] , lowercase__ : Tuple , lowercase__ : Optional[Any]=1E-5 , lowercase__ : Union[str, Any]=-1 ):
'''simple docstring'''
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__ , lowercase__ ):
raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' )
# Get mean and variance on the axis to be normalized
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = tf.nn.moments(lowercase__ , axes=[axis] , keepdims=lowercase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
SCREAMING_SNAKE_CASE__ : List[Any] = [1] * inputs.shape.rank
SCREAMING_SNAKE_CASE__ : Dict = shape_list(lowercase__ )[axis]
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.reshape(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.reshape(lowercase__ , lowercase__ )
# Compute layer normalization using the batch_normalization
# function.
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.nn.batch_normalization(
lowercase__ , lowercase__ , lowercase__ , offset=lowercase__ , scale=lowercase__ , variance_epsilon=lowercase__ , )
return outputs
def _a ( lowercase__ : List[Any] , lowercase__ : Dict=0 , lowercase__ : List[Any]=-1 ):
'''simple docstring'''
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
SCREAMING_SNAKE_CASE__ : str = tf.shape(lowercase__ )
SCREAMING_SNAKE_CASE__ : Dict = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowercase__ , lowercase__ )
def _a ( lowercase__ : tf.Tensor ):
'''simple docstring'''
if not isinstance(lowercase__ , tf.Tensor ):
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.convert_to_tensor(lowercase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
SCREAMING_SNAKE_CASE__ : str = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
SCREAMING_SNAKE_CASE__ : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
SCREAMING_SNAKE_CASE__ : Any = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def _a ( lowercase__ : tf.Tensor , lowercase__ : int , lowercase__ : str = "input_ids" ):
'''simple docstring'''
tf.debugging.assert_less(
lowercase__ , tf.cast(lowercase__ , dtype=tensor.dtype ) , message=(
f'''The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__ )}) must be smaller than the embedding '''
f'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'''
) , )
def _a ( lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
SCREAMING_SNAKE_CASE__ : str = [x for x in data if len(lowercase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
'The following attributes cannot be saved to HDF5 file because '
f'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} '''
f'''bytes: {bad_attributes}''' )
SCREAMING_SNAKE_CASE__ : Any = np.asarray(lowercase__ )
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array_split(lowercase__ , lowercase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array_split(lowercase__ , lowercase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowercase__ ):
SCREAMING_SNAKE_CASE__ : Any = chunk_data
else:
SCREAMING_SNAKE_CASE__ : Tuple = data
def _a ( lowercase__ : List[Any] , lowercase__ : int ):
'''simple docstring'''
if name in group.attrs:
SCREAMING_SNAKE_CASE__ : Dict = [n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs[name]]
else:
SCREAMING_SNAKE_CASE__ : List[str] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] )
chunk_id += 1
return data
def _a ( lowercase__ : Optional[int] ):
'''simple docstring'''
def _expand_single_ad_tensor(lowercase__ : Optional[int] ):
if isinstance(lowercase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowercase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowercase__ )
| 85 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__a :str = 'http://www.mocksite.com/file1.txt'
__a :str = '"text": ["foo", "foo"]'
__a :Tuple = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class _a :
"""simple docstring"""
_lowerCamelCase : List[Any] = 2_0_0
_lowerCamelCase : Dict = {'Content-Length': '100'}
_lowerCamelCase : Any = {}
def __A ( self : int , **UpperCAmelCase : Optional[Any] ):
return [bytes(UpperCAmelCase , "utf-8" )]
def __snake_case ( *__UpperCamelCase : int ,**__UpperCamelCase : int ):
"""simple docstring"""
return MockResponse()
@pytest.mark.parametrize("urls_type" ,[str, list, dict] )
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
import requests
monkeypatch.setattr(__UpperCamelCase ,"request" ,__UpperCamelCase )
A_ = URL
if issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = url
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = [url]
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = {"train": url}
A_ = "dummy"
A_ = "downloads"
A_ = tmp_path
A_ = DownloadConfig(
cache_dir=os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,use_etag=__UpperCamelCase ,)
A_ = DownloadManager(dataset_name=__UpperCamelCase ,download_config=__UpperCamelCase )
A_ = dl_manager.download(__UpperCamelCase )
A_ = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = [downloaded_paths]
A_ = [urls]
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
assert "train" in downloaded_paths.keys()
A_ = downloaded_paths.values()
A_ = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(__UpperCamelCase ,__UpperCamelCase ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A_ = Path(__UpperCamelCase )
A_ = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A_ = downloaded_path.read_text()
assert content == CONTENT
A_ = downloaded_path.with_suffix(".json" )
assert metadata_downloaded_path.exists()
A_ = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("paths_type" ,[str, list, dict] )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Any ):
"""simple docstring"""
A_ = str(__UpperCamelCase )
if issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = filename
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = [filename]
elif issubclass(__UpperCamelCase ,__UpperCamelCase ):
A_ = {"train": filename}
A_ = "dummy"
A_ = xz_file.parent
A_ = "extracted"
A_ = DownloadConfig(
cache_dir=__UpperCamelCase ,use_etag=__UpperCamelCase ,)
A_ = DownloadManager(dataset_name=__UpperCamelCase ,download_config=__UpperCamelCase )
A_ = dl_manager.extract(__UpperCamelCase )
A_ = paths
for extracted_paths in [extracted_paths]:
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
A_ = [extracted_paths]
A_ = [paths]
elif isinstance(__UpperCamelCase ,__UpperCamelCase ):
assert "train" in extracted_paths.keys()
A_ = extracted_paths.values()
A_ = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(__UpperCamelCase ,__UpperCamelCase ):
assert extracted_path == dl_manager.extracted_paths[input_path]
A_ = Path(__UpperCamelCase )
A_ = extracted_path.parts
assert parts[-1] == hash_url_to_filename(__UpperCamelCase ,etag=__UpperCamelCase )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A_ = extracted_path.read_text()
A_ = text_file.read_text()
assert extracted_file_content == expected_file_content
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : Any ):
"""simple docstring"""
assert path.endswith(".jsonl" )
for num_items, line in enumerate(__UpperCamelCase ,start=1 ):
A_ = json.loads(line.decode("utf-8" ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("archive_jsonl" ,["tar_jsonl_path", "zip_jsonl_path"] )
def __snake_case ( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
A_ = request.getfixturevalue(__UpperCamelCase )
A_ = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) ,start=1 ):
_test_jsonl(__UpperCamelCase ,__UpperCamelCase )
assert num_jsonl == 2
@pytest.mark.parametrize("archive_nested_jsonl" ,["tar_nested_jsonl_path", "zip_nested_jsonl_path"] )
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = request.getfixturevalue(__UpperCamelCase )
A_ = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) ,start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(__UpperCamelCase ) ,start=1 ):
_test_jsonl(__UpperCamelCase ,__UpperCamelCase )
assert num_tar == 1
assert num_jsonl == 2
def __snake_case ( __UpperCamelCase : Any ):
"""simple docstring"""
A_ = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(__UpperCamelCase ) ,start=1 ):
assert os.path.basename(__UpperCamelCase ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2 | 86 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
def wrapper(*lowercase_ , **lowercase_ ):
A__ = timeit.default_timer()
A__ = func(*lowercase_ , **lowercase_ )
A__ = timeit.default_timer() - starttime
return delta
A__ = func.__name__
return wrapper
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=100 , lowercase_=None ) -> int:
"""simple docstring"""
A__ = []
A__ = seq_shapes or {}
for i in range(lowercase_ ):
A__ = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowercase_ , _ArrayXD ):
A__ = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowercase_ , datasets.Value ):
if v.dtype == "string":
A__ = '''The small grey turtle was surprisingly fast when challenged.'''
else:
A__ = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowercase_ , datasets.Sequence ):
while isinstance(lowercase_ , datasets.Sequence ):
A__ = v.feature
A__ = seq_shapes[k]
A__ = np.random.rand(*lowercase_ ).astype(v.dtype )
A__ = data
dummy_data.append((i, example) )
return dummy_data
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=100 , lowercase_=None ) -> Optional[Any]:
"""simple docstring"""
A__ = generate_examples(lowercase_ , num_examples=lowercase_ , seq_shapes=lowercase_ )
with ArrowWriter(features=lowercase_ , path=lowercase_ ) as writer:
for key, record in dummy_data:
A__ = features.encode_example(lowercase_ )
writer.write(lowercase_ )
A__ , A__ = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
A__ = datasets.Dataset.from_file(filename=lowercase_ , info=datasets.DatasetInfo(features=lowercase_ ) )
return dataset
| 87 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 0 |
"""simple docstring"""
import functools
def _snake_case ( __snake_case : str , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase : int = len(__snake_case )
_lowerCamelCase : List[Any] = len(__snake_case )
@functools.cache
def min_distance(__snake_case : int , __snake_case : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_lowerCamelCase : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __snake_case ) , 1 + min_distance(__snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
# initialize config
if "resnet-50" in model_name:
_lowercase : Union[str, Any] = ResNetConfig.from_pretrained('microsoft/resnet-50' )
elif "resnet-101" in model_name:
_lowercase : Optional[Any] = ResNetConfig.from_pretrained('microsoft/resnet-101' )
else:
raise ValueError('Model name should include either resnet50 or resnet101' )
_lowercase : Tuple = DetrConfig(use_timm_backbone=lowerCamelCase_ , backbone_config=lowerCamelCase_ )
# set label attributes
_lowercase : Any = 'panoptic' in model_name
if is_panoptic:
_lowercase : List[Any] = 250
else:
_lowercase : str = 91
_lowercase : List[Any] = 'huggingface/label-files'
_lowercase : Any = 'coco-detection-id2label.json'
_lowercase : Tuple = json.load(open(hf_hub_download(lowerCamelCase_ , lowerCamelCase_ , repo_type='dataset' ) , 'r' ) )
_lowercase : int = {int(lowerCamelCase_ ): v for k, v in idalabel.items()}
_lowercase : int = idalabel
_lowercase : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase : List[str] = []
# stem
# fmt: off
rename_keys.append(('backbone.0.body.conv1.weight', 'backbone.conv_encoder.model.embedder.embedder.convolution.weight') )
rename_keys.append(('backbone.0.body.bn1.weight', 'backbone.conv_encoder.model.embedder.embedder.normalization.weight') )
rename_keys.append(('backbone.0.body.bn1.bias', 'backbone.conv_encoder.model.embedder.embedder.normalization.bias') )
rename_keys.append(('backbone.0.body.bn1.running_mean', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_mean') )
rename_keys.append(('backbone.0.body.bn1.running_var', 'backbone.conv_encoder.model.embedder.embedder.normalization.running_var') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
F'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
F'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
F'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
F'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
] )
return rename_keys
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[int]:
_lowercase : str = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=False ) -> str:
_lowercase : Any = ''
if is_panoptic:
_lowercase : Optional[Any] = 'detr.'
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowercase : int = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Tuple = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : List[str] = in_proj_weight[:256, :]
_lowercase : Tuple = in_proj_bias[:256]
_lowercase : List[Any] = in_proj_weight[256:512, :]
_lowercase : Any = in_proj_bias[256:512]
_lowercase : int = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowercase : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_lowercase : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowercase : Union[str, Any] = in_proj_weight[:256, :]
_lowercase : Dict = in_proj_bias[:256]
_lowercase : Tuple = in_proj_weight[256:512, :]
_lowercase : Dict = in_proj_bias[256:512]
_lowercase : str = in_proj_weight[-256:, :]
_lowercase : Optional[int] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_lowercase : Tuple = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
_lowercase : Dict = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowercase : List[str] = in_proj_weight_cross_attn[:256, :]
_lowercase : Tuple = in_proj_bias_cross_attn[:256]
_lowercase : str = in_proj_weight_cross_attn[256:512, :]
_lowercase : Union[str, Any] = in_proj_bias_cross_attn[256:512]
_lowercase : List[Any] = in_proj_weight_cross_attn[-256:, :]
_lowercase : Dict = in_proj_bias_cross_attn[-256:]
def UpperCamelCase_( ) -> List[Any]:
_lowercase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase : str = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False ) -> List[Any]:
_lowercase , _lowercase : int = get_detr_config(lowerCamelCase_ )
# load original model from torch hub
_lowercase : int = {
'detr-resnet-50': 'detr_resnet50',
'detr-resnet-101': 'detr_resnet101',
}
logger.info(F'''Converting model {model_name}...''' )
_lowercase : Optional[Any] = torch.hub.load('facebookresearch/detr' , model_name_to_original_name[model_name] , pretrained=lowerCamelCase_ ).eval()
_lowercase : str = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(lowerCamelCase_ ):
if is_panoptic:
_lowercase : str = 'detr.' + src
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowerCamelCase_ , is_panoptic=lowerCamelCase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowercase : List[Any] = 'detr.model.' if is_panoptic else 'model.'
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('detr' )
and not key.startswith('class_labels_classifier' )
and not key.startswith('bbox_predictor' )
):
_lowercase : Tuple = state_dict.pop(lowerCamelCase_ )
_lowercase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : Optional[Any] = val
elif key.startswith('bbox_attention' ) or key.startswith('mask_head' ):
continue
else:
_lowercase : Optional[Any] = state_dict.pop(lowerCamelCase_ )
_lowercase : Union[str, Any] = val
else:
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
_lowercase : Dict = state_dict.pop(lowerCamelCase_ )
_lowercase : List[str] = val
# finally, create HuggingFace model and load state dict
_lowercase : Optional[Any] = DetrForSegmentation(lowerCamelCase_ ) if is_panoptic else DetrForObjectDetection(lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
# verify our conversion on an image
_lowercase : str = 'coco_panoptic' if is_panoptic else 'coco_detection'
_lowercase : Optional[int] = DetrImageProcessor(format=lowerCamelCase_ )
_lowercase : str = processor(images=prepare_img() , return_tensors='pt' )
_lowercase : Tuple = encoding['pixel_values']
_lowercase : int = detr(lowerCamelCase_ )
_lowercase : Tuple = model(lowerCamelCase_ )
assert torch.allclose(outputs.logits , original_outputs['pred_logits'] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['pred_boxes'] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['pred_masks'] , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
processor.save_pretrained(lowerCamelCase_ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('Uploading PyTorch model and image processor to the hub...' )
model.push_to_hub(F'''nielsr/{model_name}''' )
processor.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 89 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 0 |
'''simple docstring'''
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = '''▁'''
__UpperCAmelCase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : int = BertGenerationTokenizer
lowercase__ : Optional[int] = False
lowercase__ : Tuple = True
def __SCREAMING_SNAKE_CASE ( self ) -> int:
super().setUp()
lowerCAmelCase__ = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = '''<s>'''
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<unk>''' )
self.assertEqual(vocab_keys[1] , '''<s>''' )
self.assertEqual(vocab_keys[-1] , '''<pad>''' )
self.assertEqual(len(lowerCamelCase_ ) , 10_02 )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = BertGenerationTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [2_85, 46, 10, 1_70, 3_82] , )
lowerCAmelCase__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(
lowerCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return BertGenerationTokenizer.from_pretrained('''google/bert_for_seq_generation_L-24_bbc_encoder''' )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = '''Hello World!'''
lowerCAmelCase__ = [1_85_36, 22_60, 1_01]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase__ = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(lowerCamelCase_ , self.big_tokenizer.encode(lowerCamelCase_ ) )
@require_torch
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase__ = ''' '''.join(lowerCamelCase_ )
lowerCAmelCase__ = self.big_tokenizer.encode_plus(lowerCamelCase_ , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = self.big_tokenizer.batch_encode_plus(
[sequence + ''' ''' + sequence] , return_tensors='''pt''' , return_token_type_ids=lowerCamelCase_ )
lowerCAmelCase__ = BertGenerationConfig()
lowerCAmelCase__ = BertGenerationEncoder(lowerCamelCase_ )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCamelCase_ )
model(**lowerCamelCase_ )
@slow
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
# fmt: off
lowerCAmelCase__ = {'''input_ids''': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='''google/bert_for_seq_generation_L-24_bbc_encoder''' , revision='''c817d1fd1be2ffa69431227a1fe320544943d4db''' , ) | 90 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 0 |
"""simple docstring"""
from math import sqrt
def _snake_case ( snake_case__ : int ):
A = 0
for i in range(1 , int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def _snake_case ( snake_case__ : int = 1_0000 ):
A = sum(
i
for i in range(1 , snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 91 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
UpperCamelCase_ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCAmelCase ( __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ) -> str:
for attribute in key.split('''.''' ):
lowercase : Tuple =getattr(__magic_name__ , __magic_name__ )
if weight_type is not None:
lowercase : Optional[int] =getattr(__magic_name__ , __magic_name__ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : Any =value
elif weight_type == "weight_g":
lowercase : List[Any] =value
elif weight_type == "weight_v":
lowercase : Union[str, Any] =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : Union[str, Any] =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Tuple =value
elif weight_type == "inv_freq":
lowercase : Optional[Any] =value
else:
lowercase : Tuple =value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _lowerCAmelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ) -> Union[str, Any]:
lowercase : Optional[int] =[]
lowercase : Tuple =fairseq_model.state_dict()
lowercase : List[Any] =hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Tuple =False
if "conv_layers" in name:
load_conv_layer(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
lowercase : Optional[int] ='''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Union[str, Any] =True
if "*" in mapped_key:
lowercase : Optional[int] =name.split(__magic_name__ )[0].split('''.''' )[-2]
lowercase : List[str] =mapped_key.replace('''*''' , __magic_name__ )
if "pos_bias_u" in name:
lowercase : Optional[Any] =None
elif "pos_bias_v" in name:
lowercase : Union[str, Any] =None
elif "weight_g" in name:
lowercase : Any ='''weight_g'''
elif "weight_v" in name:
lowercase : Tuple ='''weight_v'''
elif "bias" in name:
lowercase : Optional[int] ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] ='''weight'''
elif "running_mean" in name:
lowercase : Union[str, Any] ='''running_mean'''
elif "inv_freq" in name:
lowercase : Any ='''inv_freq'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Dict ='''num_batches_tracked'''
else:
lowercase : str =None
set_recursively(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
continue
if not is_used:
unused_weights.append(__magic_name__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _lowerCAmelCase ( __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple ) -> int:
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Any =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : Optional[Any] =value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : Optional[int] =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str =value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__magic_name__ )
@torch.no_grad()
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=None , __magic_name__ : Union[str, Any]=True ) -> Union[str, Any]:
if config_path is not None:
lowercase : Optional[Any] =WavaVecaConformerConfig.from_pretrained(__magic_name__ , hidden_act='''swish''' )
else:
lowercase : Optional[int] =WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : Dict ='''rotary'''
if is_finetuned:
if dict_path:
lowercase : Optional[Any] =Dictionary.load(__magic_name__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : str =target_dict.pad_index
lowercase : Union[str, Any] =target_dict.bos_index
lowercase : Any =target_dict.eos_index
lowercase : Tuple =len(target_dict.symbols )
lowercase : str =os.path.join(__magic_name__ , '''vocab.json''' )
if not os.path.isdir(__magic_name__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__magic_name__ ) )
return
os.makedirs(__magic_name__ , exist_ok=__magic_name__ )
lowercase : Dict =target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : str =0
lowercase : List[Any] =1
with open(__magic_name__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__magic_name__ , __magic_name__ )
lowercase : List[str] =WavaVecaCTCTokenizer(
__magic_name__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__magic_name__ , )
lowercase : Optional[int] =True if config.feat_extract_norm == '''layer''' else False
lowercase : str =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__magic_name__ , return_attention_mask=__magic_name__ , )
lowercase : Tuple =WavaVecaProcessor(feature_extractor=__magic_name__ , tokenizer=__magic_name__ )
processor.save_pretrained(__magic_name__ )
lowercase : str =WavaVecaConformerForCTC(__magic_name__ )
else:
lowercase : Tuple =WavaVecaConformerForPreTraining(__magic_name__ )
if is_finetuned:
lowercase , lowercase , lowercase : Optional[int] =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase : Dict =argparse.Namespace(task='''audio_pretraining''' )
lowercase : Optional[int] =fairseq.tasks.setup_task(__magic_name__ )
lowercase , lowercase , lowercase : str =fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__magic_name__ )
lowercase : List[Any] =model[0].eval()
recursively_load_weights(__magic_name__ , __magic_name__ , not is_finetuned )
hf_wavavec.save_pretrained(__magic_name__ )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 92 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 0 |
"""simple docstring"""
from __future__ import annotations
def __A (_SCREAMING_SNAKE_CASE ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('List is empty' )
return sum(_SCREAMING_SNAKE_CASE ) / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 0 |
'''simple docstring'''
def lowercase_ ( __A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : int =len(__A )
lowercase : int =sum(__A )
lowercase : List[Any] =[[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowercase : Optional[Any] =True
for i in range(1 , s + 1 ):
lowercase : Union[str, Any] =False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowercase : Union[str, Any] =dp[i][j - 1]
if arr[i - 1] <= j:
lowercase : Union[str, Any] =dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowercase : Union[str, Any] =s - 2 * j
break
return diff
| 94 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 0 |
"""simple docstring"""
import re
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = re.compile(r"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" )
if match := re.search(A__ ,A__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 95 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 0 |
"""simple docstring"""
from __future__ import annotations
import requests
__lowerCamelCase = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def a ( __UpperCAmelCase : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "new" , __UpperCAmelCase : list | None = None ) -> dict:
__magic_name__: Optional[Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__UpperCAmelCase ) - valid_terms ) ):
__magic_name__: Optional[int] = f'Invalid search term: {invalid_search_terms}'
raise ValueError(__UpperCAmelCase )
__magic_name__: Optional[int] = requests.get(
f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"""User-agent""": """A random string"""} , )
if response.status_code == 4_2_9:
raise requests.HTTPError
__magic_name__: Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__UpperCAmelCase )}
__magic_name__: str = {}
for id_ in range(__UpperCAmelCase ):
__magic_name__: str = {
item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 96 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 0 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__( datasets.BuilderConfig ):
"""simple docstring"""
a :Optional[datasets.Features] = None
a :str = "utf-8"
a :Optional[str] = None
a :Optional[str] = None
a :bool = True # deprecated
a :Optional[int] = None # deprecated
a :int = 10 << 20 # 10MB
a :Optional[bool] = None
class lowercase__( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a :Any = JsonConfig
def _lowercase ( self : Dict ) -> Optional[int]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowercase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
lowercase_ = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase_ = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase_ = self.config.features.arrow_schema.field(SCREAMING_SNAKE_CASE_ ).type
lowercase_ = pa_table.append_column(SCREAMING_SNAKE_CASE_ , pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=SCREAMING_SNAKE_CASE_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema )
return pa_table
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple ) -> List[str]:
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
# We keep only the field we are interested in
lowercase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
lowercase_ = set().union(*[row.keys() for row in dataset] )
lowercase_ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
else:
lowercase_ = dataset
lowercase_ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
# If the file has one json object per line
else:
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
lowercase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
lowercase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowercase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(SCREAMING_SNAKE_CASE_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ = batch.decode(self.config.encoding , errors=SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' )
try:
while True:
try:
lowercase_ = paj.read_json(
io.BytesIO(SCREAMING_SNAKE_CASE_ ) , read_options=paj.ReadOptions(block_size=SCREAMING_SNAKE_CASE_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(SCREAMING_SNAKE_CASE_ , pa.ArrowInvalid )
and "straddling" not in str(SCREAMING_SNAKE_CASE_ )
or block_size > len(SCREAMING_SNAKE_CASE_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(SCREAMING_SNAKE_CASE_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # list is the only sequence type supported in JSON
try:
lowercase_ = set().union(*[row.keys() for row in dataset] )
lowercase_ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
lowercase_ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
batch_idx += 1
| 97 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : int , **lowerCAmelCase__ : Dict ) -> None:
'''simple docstring'''
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 98 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
SCREAMING_SNAKE_CASE = 5_0 # max width of layer names
SCREAMING_SNAKE_CASE = 7_0 # max width of quantizer names
def a (lowerCAmelCase__ ):
__a = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=lowerCAmelCase__ , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=lowerCAmelCase__ , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=lowerCAmelCase__ , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=lowerCAmelCase__ , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=lowerCAmelCase__ , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=lowerCAmelCase__ , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def a (lowerCAmelCase__ ):
if args.calibrator == "max":
__a = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
__a = """histogram"""
elif args.calibrator == "mse":
__a = """histogram"""
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
__a = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase__ )
__a = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase__ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False , lowerCAmelCase__=False ):
logger.info("""Configuring Model for Quantization""" )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase__ , ["""embeddings"""] , which="""weight""" , _disabled=lowerCAmelCase__ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase__ , [""""""] , _disabled=lowerCAmelCase__ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase__ , args.quant_disable_keyword , _disabled=lowerCAmelCase__ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase__ , [r"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=lowerCAmelCase__ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase__ , [r"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=lowerCAmelCase__ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase__ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase__ , lowerCAmelCase__ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase__ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase__ )
def a (lowerCAmelCase__ ):
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
def fusea(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase__ , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
__a = qq._amax.detach().item()
__a = qk._amax.detach().item()
__a = qv._amax.detach().item()
__a = max(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
qq._amax.fill_(lowerCAmelCase__ )
qk._amax.fill_(lowerCAmelCase__ )
qv._amax.fill_(lowerCAmelCase__ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
__a = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase__ )
__a = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def a (lowerCAmelCase__ ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
__a = mod.weight.shape[0]
__a = mod._weight_quantizer._amax.detach()
__a = torch.ones(lowerCAmelCase__ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def a (lowerCAmelCase__ ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
__a = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
__a = set(range(len(mod.weight.size() ) ) ) - axis_set
__a = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase__ , keepdims=lowerCAmelCase__ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
__a = amax
def a (lowerCAmelCase__ , lowerCAmelCase__=25 , lowerCAmelCase__=180 , lowerCAmelCase__=None ):
if ignore is None:
__a = []
elif not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = [ignore]
__a = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase__ , """weight""" ):
continue
__a = max(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
for name, mod in model.named_modules():
__a = getattr(lowerCAmelCase__ , """_input_quantizer""" , lowerCAmelCase__ )
__a = getattr(lowerCAmelCase__ , """_weight_quantizer""" , lowerCAmelCase__ )
if not hasattr(lowerCAmelCase__ , """weight""" ):
continue
if type(lowerCAmelCase__ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase__ ) is str and s in name]:
continue
__a = f'''Act:{input_q.extra_repr()}'''
__a = f'''Wgt:{weight_q.extra_repr()}'''
__a = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase__ ) <= line_width:
logger.info(lowerCAmelCase__ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{' ':{name_width}} {wgt_str}''' )
def a (lowerCAmelCase__ ):
__a = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase__ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase__ , lowerCAmelCase__ )
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="both" , **lowerCAmelCase__ ):
__a = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase__ , lowerCAmelCase__ , """_input_quantizer""" , lowerCAmelCase__ , lowerCAmelCase__ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase__ , lowerCAmelCase__ , """_weight_quantizer""" , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(lowerCAmelCase__ )
def a (lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ):
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase__ , """_input_quantizer""" ) or hasattr(lowerCAmelCase__ , """_weight_quantizer""" ):
for n in names:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
set_quantizers(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(lowerCAmelCase__ , lowerCAmelCase__ ):
__a = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
logger.info(lowerCAmelCase__ )
| 99 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 0 |
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = jnp.ones((batch_size, length) ) / length
return scores
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(batch_size=2 , length=A_ )
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE__ = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
SCREAMING_SNAKE_CASE__ = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE__ = jax.nn.softmax(A_ , axis=-1 )
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=1.3 )
SCREAMING_SNAKE_CASE__ = jax.nn.softmax(temp_dist_warper_sharper(A_ , scores.copy() , cur_len=A_ ) , axis=-1 )
SCREAMING_SNAKE_CASE__ = jax.nn.softmax(temp_dist_warper_smoother(A_ , scores.copy() , cur_len=A_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 2
# create ramp distribution
SCREAMING_SNAKE_CASE__ = np.broadcast_to(np.arange(A_ )[None, :] , (batch_size, vocab_size) ).copy()
SCREAMING_SNAKE_CASE__ = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ = top_k_warp(A_ , A_ , cur_len=A_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
SCREAMING_SNAKE_CASE__ = np.broadcast_to(np.arange(A_ )[None, :] , (batch_size, length) ).copy()
SCREAMING_SNAKE_CASE__ = top_k_warp_safety_check(A_ , A_ , cur_len=A_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE__ = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.8 )
SCREAMING_SNAKE_CASE__ = np.exp(top_p_warp(A_ , A_ , cur_len=A_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE__ = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE__ = np.broadcast_to(np.arange(A_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE__ = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
SCREAMING_SNAKE_CASE__ = top_p_warp(A_ , A_ , cur_len=A_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A_ )
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, 20) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = min_dist_processor(A_ , A_ , cur_len=A_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float('''inf''' )] )
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = min_dist_processor(A_ , A_ , cur_len=A_ )
self.assertFalse(jnp.isinf(A_ ).any() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A_ )
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, 1) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertFalse(jnp.isinf(A_ ).any() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 20
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A_ , eos_token_id=A_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, 4) , vocab_size=20 )
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = logits_processor(A_ , A_ , cur_len=A_ )
self.assertFalse(jnp.isinf(A_ ).any() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, sequence_length) , A_ )
SCREAMING_SNAKE_CASE__ = input_ids.copy()
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A_ , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = 10
# no processor list
SCREAMING_SNAKE_CASE__ = temp_dist_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_k_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_p_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = min_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = bos_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = eos_dist_proc(A_ , A_ , cur_len=A_ )
# with processor list
SCREAMING_SNAKE_CASE__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ = processor(A_ , A_ , cur_len=A_ )
# scores should be equal
self.assertTrue(jnp.allclose(A_ , A_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 4
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 15
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE__ = ids_tensor((batch_size, sequence_length) , A_ )
SCREAMING_SNAKE_CASE__ = input_ids.copy()
SCREAMING_SNAKE_CASE__ = self._get_uniform_logits(A_ , A_ )
SCREAMING_SNAKE_CASE__ = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE__ = FlaxTemperatureLogitsWarper(temperature=0.5 )
SCREAMING_SNAKE_CASE__ = FlaxTopKLogitsWarper(3 )
SCREAMING_SNAKE_CASE__ = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
SCREAMING_SNAKE_CASE__ = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = FlaxForcedEOSTokenLogitsProcessor(max_length=A_ , eos_token_id=A_ )
SCREAMING_SNAKE_CASE__ = 10
# no processor list
def run_no_processor_list(A_ , A_ , A_ ):
SCREAMING_SNAKE_CASE__ = temp_dist_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_k_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = top_p_warp(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = min_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = bos_dist_proc(A_ , A_ , cur_len=A_ )
SCREAMING_SNAKE_CASE__ = eos_dist_proc(A_ , A_ , cur_len=A_ )
return scores
# with processor list
def run_processor_list(A_ , A_ , A_ ):
SCREAMING_SNAKE_CASE__ = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
SCREAMING_SNAKE_CASE__ = processor(A_ , A_ , cur_len=A_ )
return scores
SCREAMING_SNAKE_CASE__ = jax.jit(A_ )
SCREAMING_SNAKE_CASE__ = jax.jit(A_ )
SCREAMING_SNAKE_CASE__ = jitted_run_no_processor_list(A_ , A_ , A_ )
SCREAMING_SNAKE_CASE__ = jitted_run_processor_list(A_ , A_ , A_ )
# scores should be equal
self.assertTrue(jnp.allclose(A_ , A_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
| 100 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 0 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase__ : List[Any] =[
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
lowerCAmelCase__ : Optional[int] =[
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : int = torch.load(A__, map_location='cpu' )
return sd
def a__ ( A__, A__, A__=rename_keys_prefix ):
SCREAMING_SNAKE_CASE_ : List[Any] = OrderedDict()
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
SCREAMING_SNAKE_CASE_ : Any = key
for name_pair in rename_keys_prefix:
SCREAMING_SNAKE_CASE_ : Any = new_key.replace(name_pair[0], name_pair[1] )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
SCREAMING_SNAKE_CASE_ : str = new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def a__ ( A__, A__ ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[str] = 'pretraining'
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Any = {'visual_embedding_dim': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : str = {'visual_embedding_dim': 2_0_4_8}
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : List[Any] = {'visual_embedding_dim': 2_0_4_8}
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Any = {'visual_embedding_dim': 1_0_2_4}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'visual_embedding_dim': 5_1_2}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'multichoice'
elif "vqa_advanced" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : Tuple = {'visual_embedding_dim': 2_0_4_8}
SCREAMING_SNAKE_CASE_ : int = 'vqa_advanced'
elif "vqa" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : str = {'visual_embedding_dim': 2_0_4_8, 'num_labels': 3_1_2_9}
SCREAMING_SNAKE_CASE_ : List[str] = 'vqa'
elif "nlvr" in checkpoint_path:
SCREAMING_SNAKE_CASE_ : int = {
'visual_embedding_dim': 1_0_2_4,
'num_labels': 2,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = 'nlvr'
SCREAMING_SNAKE_CASE_ : int = VisualBertConfig(**A__ )
# Load State Dict
SCREAMING_SNAKE_CASE_ : List[str] = load_state_dict(A__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_new_dict(A__, A__ )
if model_type == "pretraining":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisualBertForPreTraining(A__ )
elif model_type == "vqa":
SCREAMING_SNAKE_CASE_ : Dict = VisualBertForQuestionAnswering(A__ )
elif model_type == "nlvr":
SCREAMING_SNAKE_CASE_ : Dict = VisualBertForVisualReasoning(A__ )
elif model_type == "multichoice":
SCREAMING_SNAKE_CASE_ : Any = VisualBertForMultipleChoice(A__ )
model.load_state_dict(A__ )
# Save Checkpoints
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
lowerCAmelCase__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ : Dict =parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 101 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
UpperCamelCase : Tuple = filter(lambda SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
UpperCamelCase : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__magic_name__ : Tuple = logging.getLogger(__name__)
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if metric == "rouge2":
UpperCamelCase : Tuple = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
UpperCamelCase : int = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
UpperCamelCase : Union[str, Any] = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
UpperCamelCase : Tuple = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
UpperCamelCase : int = ModelCheckpoint(
dirpath=SCREAMING_SNAKE_CASE , filename=SCREAMING_SNAKE_CASE , monitor=f"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return EarlyStopping(
monitor=f"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=SCREAMING_SNAKE_CASE , verbose=SCREAMING_SNAKE_CASE , )
class lowercase__ ( pl.Callback ):
"""simple docstring"""
def _a ( self , _A , _A ):
'''simple docstring'''
UpperCamelCase : str = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def _a ( self , _A , _A , _A , _A=True ):
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
UpperCamelCase : List[Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
UpperCamelCase : int = Path(pl_module.hparams.output_dir )
if type_path == "test":
UpperCamelCase : Tuple = od / """test_results.txt"""
UpperCamelCase : List[str] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
UpperCamelCase : int = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
UpperCamelCase : str = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , """a+""" ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
UpperCamelCase : int = metrics[key]
if isinstance(_A , torch.Tensor ):
UpperCamelCase : Optional[int] = val.item()
UpperCamelCase : Union[str, Any] = f"""{key}: {val:.6f}\n"""
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
UpperCamelCase : int = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_A )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
try:
UpperCamelCase : Any = pl_module.model.model.num_parameters()
except AttributeError:
UpperCamelCase : Any = pl_module.model.num_parameters()
UpperCamelCase : Any = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , """test""" )
@rank_zero_only
def _a ( self , _A , _A ):
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 102 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 0 |
"""simple docstring"""
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 103 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 0 |
"""simple docstring"""
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Any = WavaVecaPhonemeCTCTokenizer
A__ : Dict = False
def snake_case__ ( self ) -> List[str]:
super().setUp()
A__ = (
"<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː "
"ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː "
"ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 "
"oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ "
"pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ "
"yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ "
"əʊ S ɡʲ onɡ2 u\" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ "
"ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ "
"ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ "
"uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ "
"ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ "
"ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ "
"ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4"
).split(" " )
A__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
A__ = {"pad_token": "<pad>", "unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>"}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=20 , SCREAMING_SNAKE_CASE__=5 ) -> Tuple[str, list]:
A__ = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )) for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
A__ = list(filter(lambda SCREAMING_SNAKE_CASE__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) )
if max_length is not None and len(SCREAMING_SNAKE_CASE__ ) > max_length:
A__ = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__ ) < min_length and len(SCREAMING_SNAKE_CASE__ ) > 0:
while len(SCREAMING_SNAKE_CASE__ ) < min_length:
A__ = toks + toks
# toks_str = [t[1] for t in toks]
A__ = [t[0] for t in toks]
# Ensure consistency
A__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__ ) > 1:
A__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
)
if with_prefix_space:
A__ = " " + output_txt
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
return output_txt, output_ids
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Any:
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Any:
A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
# check adding a single token
tokenizer.add_tokens("xxx" )
A__ = tokenizer("m xxx ɪ" , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(["aaa", "bbb", "ccc"] )
A__ = tokenizer("m aaa ɪ ccc" , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
A__ = tokenizer("maɪ c" , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ , [3, 200] ) # mai should be <unk> (=3)
def snake_case__ ( self ) -> int:
A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A__ = "Hello how are you"
A__ = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "h ə l oʊ h aʊ ɑːɹ j uː" )
def snake_case__ ( self ) -> List[Any]:
A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A__ = "Hello how are you"
A__ = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE__ , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids )
def snake_case__ ( self ) -> List[Any]:
A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A__ = "Hello how are you"
A__ = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" )
A__ = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[str]:
A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
A__ = tokenizer.decode(sample_ids[0] )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
def snake_case__ ( self ) -> str:
A__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A__ = "Hello how are you"
A__ = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "h ə l oʊ | h aʊ | ɑːɹ | j uː |" )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A__ = "Hello how are you"
A__ = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" )
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , tokenizer(SCREAMING_SNAKE_CASE__ , do_phonemize=SCREAMING_SNAKE_CASE__ ).input_ids )
def snake_case__ ( self ) -> str:
A__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
A__ = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
A__ = tokenizer.decode(sample_ids[0] )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["k s ɾ ɾ l ɭʲ", "j ð s j ð s oːɹ"] )
# decode with no word_del_token filter
A__ = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , batch_tokens[0] )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["k s ɾ | ɾ l | ɭʲ", "| j ð | s j ð s oːɹ"] )
def snake_case__ ( self ) -> Dict:
A__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A__ = "Hello how are you"
A__ = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" )
A__ = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token="|" )
tokenizer.add_tokens("|" )
A__ = "Hello how are you"
A__ = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" )
A__ = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ )
self.assertEqual(" ".join([p.strip() for p in phonemes.split(" |" )] ).strip() , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Any:
A__ = self.tokenizer_class.from_pretrained(
"facebook/wav2vec2-lv-60-espeak-cv-ft" , word_delimiter_token=SCREAMING_SNAKE_CASE__ )
A__ = "Hello how are you"
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , phonemizer_lang="en-us" ).input_ids
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , phonemizer_lang="fr-fr" ).input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "h ə l oʊ h aʊ ɑːɹ j uː" )
self.assertEqual(SCREAMING_SNAKE_CASE__ , "ɛ l o h aʊ a ʁ j u" )
def snake_case__ ( self ) -> int:
A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
A__ = "Hello how Are you"
A__ = "hello how are you"
A__ = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
A__ = tokenizer(SCREAMING_SNAKE_CASE__ ).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[str]:
A__ = self.tokenizer_class.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft" )
tokenizer.add_tokens(["!", "?"] )
tokenizer.add_special_tokens({"cls_token": "$$$"} )
# fmt: off
A__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , ["k s ɾ ɾ l ɭʲ!?!? $$$", "j ð s j ð s oːɹ $$$"] )
@staticmethod
def snake_case__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
A__ = [d[key] for d in offsets]
return retrieved_list
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_tokenizer(word_delimiter_token="|" )
tokenizer.add_tokens("|" )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
A__ = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
A__ = tokenizer.decode(SCREAMING_SNAKE_CASE__ , output_char_offsets=SCREAMING_SNAKE_CASE__ , filter_word_delimiter_token=SCREAMING_SNAKE_CASE__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue("text" in outputs )
self.assertTrue("char_offsets" in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(" ".join(self.get_from_offsets(outputs["char_offsets"] , "char" ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "char" ) , ["k", "s", "ɾ", "ɾ", "|", "ɾ", "l", "|", "ɭʲ"] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "start_offset" ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs["char_offsets"] , "end_offset" ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_tokenizer(word_delimiter_token="|" )
def check_list_tuples_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertTrue(isinstance(outputs_list[0] , SCREAMING_SNAKE_CASE__ ) )
# transform list to ModelOutput
A__ = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch["text"] , outputs_batch_a["text"] )
def recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
[recursive_check(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for la, la in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch["char_offsets"] , outputs_batch_a["char_offsets"] )
# fmt: off
A__ = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , output_char_offsets=SCREAMING_SNAKE_CASE__ )
A__ = [tokenizer.decode(SCREAMING_SNAKE_CASE__ , output_char_offsets=SCREAMING_SNAKE_CASE__ ) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@unittest.skip("Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes" )
def snake_case__ ( self ) -> Union[str, Any]:
pass
@unittest.skip("Wav2Vec2PhonemeTokenizer always puts spaces between phonemes" )
def snake_case__ ( self ) -> Optional[Any]:
pass
@unittest.skip("encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency" )
def snake_case__ ( self ) -> List[str]:
pass
@unittest.skip("Wav2Vec2PhonemeModel has no max model length => no testing" )
def snake_case__ ( self ) -> Any:
pass
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A__ = tokenizer.vocab_size
A__ = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
A__ = ["aaaaa bbbbbb", "cccccccccdddddddd"]
A__ = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.vocab_size
A__ = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size + len(SCREAMING_SNAKE_CASE__ ) )
A__ = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
A__ = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"}
A__ = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.vocab_size
A__ = len(SCREAMING_SNAKE_CASE__ )
self.assertNotEqual(SCREAMING_SNAKE_CASE__ , 0 )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , all_size_a + len(SCREAMING_SNAKE_CASE__ ) )
A__ = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=SCREAMING_SNAKE_CASE__ )
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def snake_case__ ( self ) -> str:
pass
@unittest.skip("The tokenizer shouldn't be used to encode input IDs (except for labels), only to decode." )
def snake_case__ ( self ) -> List[Any]:
pass
def snake_case__ ( self ) -> Optional[int]:
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
A__ = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
A__ = ["ð", "ɪ", "s", "ɪ", "z", "ɐ", "t", "ɛ", "k", "s", "t"]
A__ = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(output["text"] , SCREAMING_SNAKE_CASE__ )
| 104 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 0 |
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Dict:
"""simple docstring"""
if isinstance(lowerCamelCase_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowerCAmelCase_ :
def snake_case ( self ,snake_case__ ,snake_case__ ):
pass
def snake_case ( self ):
pass
def snake_case ( self ):
pass
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = np.abs((a - b) ).max()
self.assertLessEqual(snake_case__ ,snake_case__ ,F'Difference between torch and flax is {diff} (>= {tol}).' )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], config.projection_dim) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_vision_text_model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
self.assertEqual(output['text_embeds'].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = model(input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = after_output[0]
SCREAMING_SNAKE_CASE_ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ ,1E-3 )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=None ,**snake_case__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_vision_text_model(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = {'vision_model': vision_model, 'text_model': text_model}
SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model(
input_ids=snake_case__ ,pixel_values=snake_case__ ,attention_mask=snake_case__ ,output_attentions=snake_case__ )
SCREAMING_SNAKE_CASE_ : int = output.vision_model_output.attentions
self.assertEqual(len(snake_case__ ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Tuple = to_atuple(vision_model.config.image_size )
SCREAMING_SNAKE_CASE_ : int = to_atuple(vision_model.config.patch_size )
SCREAMING_SNAKE_CASE_ : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
SCREAMING_SNAKE_CASE_ : str = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
SCREAMING_SNAKE_CASE_ : str = output.text_model_output.attentions
self.assertEqual(len(snake_case__ ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
pt_model.to(snake_case__ )
pt_model.eval()
# prepare inputs
SCREAMING_SNAKE_CASE_ : int = inputs_dict
SCREAMING_SNAKE_CASE_ : Tuple = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[str] = pt_model(**snake_case__ ).to_tuple()
SCREAMING_SNAKE_CASE_ : Optional[Any] = fx_model(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Any = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ ,from_pt=snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = fx_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(snake_case__ ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = VisionTextDualEncoderModel.from_pretrained(snake_case__ ,from_flax=snake_case__ )
pt_model_loaded.to(snake_case__ )
pt_model_loaded.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pt_model_loaded(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) ,len(snake_case__ ) ,'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(snake_case__ ,pt_output_loaded.numpy() ,4E-2 )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : int = VisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = fx_state
self.check_pt_flax_equivalence(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(snake_case__ ,snake_case__ )
SCREAMING_SNAKE_CASE_ : List[Any] = VisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxVisionTextDualEncoderModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Tuple = load_flax_weights_in_pytorch_model(snake_case__ ,fx_model.params )
self.check_pt_flax_equivalence(snake_case__ ,snake_case__ ,snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = self.prepare_config_and_inputs()
self.check_save_load(**snake_case__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**snake_case__ )
@is_pt_flax_cross_test
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_inputs_dict.pop('vision_config' )
SCREAMING_SNAKE_CASE_ : Tuple = config_inputs_dict.pop('text_config' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = config_inputs_dict
self.check_equivalence_pt_to_flax(snake_case__ ,snake_case__ ,snake_case__ )
self.check_equivalence_flax_to_pt(snake_case__ ,snake_case__ ,snake_case__ )
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_pretrained_model_and_inputs()
SCREAMING_SNAKE_CASE_ : Any = model_a(**snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_pretrained(snake_case__ )
SCREAMING_SNAKE_CASE_ : List[str] = model_a(**snake_case__ )
SCREAMING_SNAKE_CASE_ : int = after_outputs[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case__ ,1E-5 )
@require_flax
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=snake_case__ ,text_from_pt=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[Any] = 13
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : Any = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : Tuple = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = FlaxViTModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : str = FlaxBertModel(snake_case__ )
return vision_model, text_model
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = FlaxViTModelTester(self )
SCREAMING_SNAKE_CASE_ : int = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = vit_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' ,'hf-internal-testing/tiny-bert' ,vision_from_pt=snake_case__ ,text_from_pt=snake_case__ ,)
SCREAMING_SNAKE_CASE_ : Any = 13
SCREAMING_SNAKE_CASE_ : str = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
SCREAMING_SNAKE_CASE_ : int = random_attention_mask([batch_size, 4] )
SCREAMING_SNAKE_CASE_ : Any = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def snake_case ( self ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = FlaxCLIPVisionModel(snake_case__ )
SCREAMING_SNAKE_CASE_ : Dict = FlaxBertModel(snake_case__ )
return vision_model, text_model
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = FlaxCLIPVisionModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxBertModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = clip_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : Tuple = bert_model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = vision_config_and_inputs
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : int = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' ,logit_scale_init_value=1.0 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
SCREAMING_SNAKE_CASE_ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor(
text=['una foto di un gatto', 'una foto di un cane'] ,images=snake_case__ ,padding=snake_case__ ,return_tensors='np' )
SCREAMING_SNAKE_CASE_ : List[Any] = model(**snake_case__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
SCREAMING_SNAKE_CASE_ : Dict = np.array([[1.2284727, 0.3104122]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,snake_case__ ,atol=1E-3 ) )
| 105 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 0 |
from collections.abc import Sequence
def lowerCamelCase_ ( lowerCAmelCase__ : Sequence[int] | None = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
A = nums[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
A = nums[i]
A = max(lowerCAmelCase__ , ans + num , lowerCAmelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
__snake_case :str =int(input('Enter number of elements : ').strip())
__snake_case :Optional[int] =list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array)) | 106 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase : List[str] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Optional[Any] ):
inspect_dataset(__snake_case , __snake_case )
_A = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Dict ):
inspect_metric(__snake_case , __snake_case )
_A = path + '.py'
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Any , __snake_case : Union[str, Any] ):
_A = get_dataset_config_info(__snake_case , config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : List[str] ):
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case , config_name=__snake_case )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : Optional[Any] ):
_A = get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
_A = get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
_A = expected_configs[0]
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Optional[int] , __snake_case : List[Any] ):
_A = get_dataset_infos(__snake_case )
assert expected_config in infos
_A = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Any ):
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case , config_name=__snake_case )
| 107 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 0 |
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def lowerCamelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(lowerCamelCase , """num_attention_heads""" ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowerCamelCase : Any , lowerCamelCase : Optional[int]=13 , lowerCamelCase : List[str]=32 , lowerCamelCase : Any=2 , lowerCamelCase : Dict=3 , lowerCamelCase : int=640 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Tuple="silu" , lowerCamelCase : Dict=3 , lowerCamelCase : Tuple=32 , lowerCamelCase : Any=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Optional[Any]=True , lowerCamelCase : int=True , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : Dict=None , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = last_hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = output_stride
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
def lowerCamelCase ( self : int ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowerCamelCase ( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : Tuple , lowerCamelCase : Tuple ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTModelTester(self )
_UpperCAmelCase = MobileViTConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase )
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def lowerCamelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def lowerCamelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def lowerCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
pass
def lowerCamelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : List[str] ) -> int:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 5
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase = 2
for i in range(len(lowerCamelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
@slow
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileViTModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(lowerCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = model.to(lowerCamelCase )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = model.to(lowerCamelCase )
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(50, 60)] )
_UpperCAmelCase = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
_UpperCAmelCase = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase ) | 108 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a = logging.get_logger(__name__)
a = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : Optional[Any] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] ,lowerCamelCase : str=25_0880 ,lowerCamelCase : List[Any]=64 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : Tuple=8 ,lowerCamelCase : Union[str, Any]=1E-5 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : str=True ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Optional[int]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : Tuple=False ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
__SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = pretraining_tp
__SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
class __a ( _snake_case ):
__UpperCamelCase : int = version.parse('1.12' )
def __init__( self : Optional[int] ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ,lowerCamelCase : List[PatchingSpec] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase ,task=lowerCamelCase ,patching_specs=lowerCamelCase ,use_past=lowerCamelCase )
if not getattr(self._config ,"""pad_token_id""" ,lowerCamelCase ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" ,inverted_values_shape=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self._config.n_head
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 1E-3
def UpperCAmelCase__ ( self : str ,lowerCamelCase : "PreTrainedTokenizer" ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional["TensorType"] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs(
lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 13
| 109 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class a :
def __init__( self , UpperCamelCase_ , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : Tuple = 13
UpperCAmelCase__ : str = 7
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = 2
UpperCAmelCase__ : Union[str, Any] = 99
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Optional[int] = 32
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : Optional[int] = 0.1
UpperCAmelCase__ : List[Any] = 0.1
UpperCAmelCase__ : str = 512
UpperCAmelCase__ : str = 16
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Optional[int] = 0.02
UpperCAmelCase__ : Optional[Any] = 3
UpperCAmelCase__ : Optional[Any] = 4
UpperCAmelCase__ : Any = 'last'
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Union[str, Any] = 0
def __snake_case ( self ):
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
UpperCAmelCase__ : int = None
if self.use_input_lengths:
UpperCAmelCase__ : Dict = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase__ : Optional[int] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCAmelCase__ : int = None
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
UpperCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ : int = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
UpperCAmelCase__ : int = TFFlaubertModel(config=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ )
UpperCAmelCase__ : int = [input_ids, input_mask]
UpperCAmelCase__ : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
UpperCAmelCase__ : Dict = TFFlaubertWithLMHeadModel(UpperCamelCase_ )
UpperCAmelCase__ : Dict = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
UpperCAmelCase__ : Optional[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
UpperCAmelCase__ : List[Any] = TFFlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
UpperCAmelCase__ : Any = {'input_ids': input_ids, 'lengths': input_lengths}
UpperCAmelCase__ : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
UpperCAmelCase__ : Optional[Any] = TFFlaubertForSequenceClassification(UpperCamelCase_ )
UpperCAmelCase__ : Optional[int] = {'input_ids': input_ids, 'lengths': input_lengths}
UpperCAmelCase__ : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : Dict = TFFlaubertForTokenClassification(config=UpperCamelCase_ )
UpperCAmelCase__ : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCAmelCase__ : int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
UpperCAmelCase__ : Tuple = self.num_choices
UpperCAmelCase__ : Any = TFFlaubertForMultipleChoice(config=UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : str = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[Any] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ : List[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCAmelCase__ : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : str = config_and_inputs
UpperCAmelCase__ : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class a ( lowercase , lowercase , unittest.TestCase ):
UpperCamelCase : Dict = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase : Dict = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[Any] = False
UpperCamelCase : Optional[Any] = False
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = TFFlaubertModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , emb_dim=37 )
def __snake_case ( self ):
self.config_tester.run_common_tests()
def __snake_case ( self ):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*UpperCamelCase_ )
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*UpperCamelCase_ )
@slow
def __snake_case ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[Any] = TFFlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
@slow
def __snake_case ( self ):
UpperCAmelCase__ : List[Any] = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
UpperCAmelCase__ : Any = model(UpperCamelCase_ )[0]
UpperCAmelCase__ : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , UpperCamelCase_ )
# compare the actual values for a slice.
UpperCAmelCase__ : Any = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 110 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 0 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __lowerCAmelCase ( TensorFormatter[Mapping, """torch.Tensor""", Mapping] ):
'''simple docstring'''
def __init__( self : Tuple ,_a : Tuple=None ,**_a : Tuple ):
'''simple docstring'''
super().__init__(features=_A )
A_ : List[str] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _a ( self : Tuple ,_a : Any ):
'''simple docstring'''
import torch
if isinstance(_A ,_A ) and column:
if all(
isinstance(_A ,torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(_A )
return column
def _a ( self : Any ,_a : Tuple ):
'''simple docstring'''
import torch
if isinstance(_A ,(str, bytes, type(_A )) ):
return value
elif isinstance(_A ,(np.character, np.ndarray) ) and np.issubdtype(value.dtype ,np.character ):
return value.tolist()
A_ : List[Any] = {}
if isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.integer ):
A_ : str = {'''dtype''': torch.intaa}
elif isinstance(_A ,(np.number, np.ndarray) ) and np.issubdtype(value.dtype ,np.floating ):
A_ : Optional[int] = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A ,PIL.Image.Image ):
A_ : List[str] = np.asarray(_A )
return torch.tensor(_A ,**{**default_dtype, **self.torch_tensor_kwargs} )
def _a ( self : Union[str, Any] ,_a : Dict ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(_A ,"""__array__""" ) and not isinstance(_A ,torch.Tensor ):
A_ : Tuple = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A ,np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A ,(list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def _a ( self : List[str] ,_a : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize ,_A ,map_list=_A )
def _a ( self : Optional[int] ,_a : pa.Table ):
'''simple docstring'''
A_ : Union[str, Any] = self.numpy_arrow_extractor().extract_row(_A )
A_ : List[str] = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def _a ( self : Dict ,_a : pa.Table ):
'''simple docstring'''
A_ : Optional[int] = self.numpy_arrow_extractor().extract_column(_A )
A_ : str = self.python_features_decoder.decode_column(_A ,pa_table.column_names[0] )
A_ : Union[str, Any] = self.recursive_tensorize(_A )
A_ : List[str] = self._consolidate(_A )
return column
def _a ( self : Dict ,_a : pa.Table ):
'''simple docstring'''
A_ : Union[str, Any] = self.numpy_arrow_extractor().extract_batch(_A )
A_ : Tuple = self.python_features_decoder.decode_batch(_A )
A_ : str = self.recursive_tensorize(_A )
for column_name in batch:
A_ : List[Any] = self._consolidate(batch[column_name] )
return batch
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowercase_ ( __a ):
def __init__( self , __A , __A=None , __A=None , __A=0 ) -> List[str]:
SCREAMING_SNAKE_CASE_ : str =1.0 if scale is None else scale
SCREAMING_SNAKE_CASE_ : Optional[int] =0.0 if loc is None else loc
super().__init__(_A , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_A )] )
@property
def _snake_case ( self ) -> List[Any]:
return self.base_dist.mean * self.scale + self.loc
@property
def _snake_case ( self ) -> Tuple:
return self.base_dist.variance * self.scale**2
@property
def _snake_case ( self ) -> Any:
return self.variance.sqrt()
class lowercase_ ( nn.Module ):
def __init__( self , __A , __A , __A , **__A ) -> int:
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : str =args_dim
SCREAMING_SNAKE_CASE_ : int =nn.ModuleList([nn.Linear(_A , _A ) for dim in args_dim.values()] )
SCREAMING_SNAKE_CASE_ : int =domain_map
def _snake_case ( self , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[str] =[proj(_A ) for proj in self.proj]
return self.domain_map(*_A )
class lowercase_ ( nn.Module ):
def __init__( self , __A ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ : Optional[Any] =function
def _snake_case ( self , __A , *__A ) -> List[Any]:
return self.function(_A , *_A )
class lowercase_ :
__lowerCamelCase = 4_2
__lowerCamelCase = 4_2
__lowerCamelCase = 4_2
def __init__( self , __A = 1 ) -> Dict:
SCREAMING_SNAKE_CASE_ : Optional[Any] =dim
SCREAMING_SNAKE_CASE_ : str ={k: dim * self.args_dim[k] for k in self.args_dim}
def _snake_case ( self , __A ) -> Tuple:
if self.dim == 1:
return self.distribution_class(*_A )
else:
return Independent(self.distribution_class(*_A ) , 1 )
def _snake_case ( self , __A , __A = None , __A = None , ) -> int:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self._base_distribution(_A )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_A , loc=_A , scale=_A , event_dim=self.event_dim )
@property
def _snake_case ( self ) -> Optional[int]:
return () if self.dim == 1 else (self.dim,)
@property
def _snake_case ( self ) -> Tuple:
return len(self.event_shape )
@property
def _snake_case ( self ) -> Optional[int]:
return 0.0
def _snake_case ( self , __A ) -> Any:
return ParameterProjection(
in_features=_A , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def _snake_case ( self , *__A ) -> str:
raise NotImplementedError()
@staticmethod
def _snake_case ( __A ) -> Tuple:
return (x + torch.sqrt(torch.square(_A ) + 4.0 )) / 2.0
class lowercase_ ( __a ):
__lowerCamelCase = {"df": 1, "loc": 1, "scale": 1}
__lowerCamelCase = StudentT
@classmethod
def _snake_case ( cls , __A , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
SCREAMING_SNAKE_CASE_ : str =2.0 + cls.squareplus(_A )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __a ):
__lowerCamelCase = {"loc": 1, "scale": 1}
__lowerCamelCase = Normal
@classmethod
def _snake_case ( cls , __A , __A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : Dict =cls.squareplus(_A ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __a ):
__lowerCamelCase = {"total_count": 1, "logits": 1}
__lowerCamelCase = NegativeBinomial
@classmethod
def _snake_case ( cls , __A , __A ) -> str:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =cls.squareplus(_A )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def _snake_case ( self , __A ) -> Any:
SCREAMING_SNAKE_CASE_ : Dict =distr_args
if self.dim == 1:
return self.distribution_class(total_count=_A , logits=_A )
else:
return Independent(self.distribution_class(total_count=_A , logits=_A ) , 1 )
def _snake_case ( self , __A , __A = None , __A = None ) -> Any:
SCREAMING_SNAKE_CASE_ : Any =distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 443 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 0 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( __a ):
"""simple docstring"""
_lowerCamelCase = ["""input_features""", """attention_mask"""]
def __init__( self , __A=80 , __A=16000 , __A=80 , __A=0.0 , __A=True , __A=True , __A=True , **__A , ):
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__a = num_mel_bins
__a = do_ceptral_normalize
__a = normalize_means
__a = normalize_vars
__a = True
def snake_case_ ( self , __A , ):
__a = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__a = torch.from_numpy(_A ).unsqueeze(0 )
__a = ta_kaldi.fbank(_A , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def snake_case_ ( __A , __A , __A = True , __A = True , __A = 0.0 , ):
if normalize_means:
__a = x[:input_length].mean(axis=0 )
__a = np.subtract(_A , _A )
if normalize_vars:
__a = x[:input_length].std(axis=0 )
__a = np.divide(_A , _A )
if input_length < x.shape[0]:
__a = padding_value
# make sure array is in float32
__a = x.astype(np.floataa )
return x
def snake_case_ ( self , __A , __A = None ):
__a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_A , _A , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_A , _A )
]
def __call__( self , __A , __A = False , __A = None , __A = False , __A = None , __A = None , __A = None , __A = None , **__A , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__a = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__a = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__a = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__a = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__a = [raw_speech]
# extract fbank features
__a = [self._extract_fbank_features(_A ) for waveform in raw_speech]
# convert into correct format for padding
__a = BatchFeature({"""input_features""": features} )
__a = self.pad(
_A , padding=_A , max_length=_A , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=_A , **_A , )
# make sure list is in array format
__a = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , _A ):
__a = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
__a = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
__a = [np.asarray(_A , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__a = (
np.array(_A , dtype=np.intaa )
if self._get_padding_strategies(_A , max_length=_A ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__a = self.normalize(
padded_inputs["""input_features"""] , attention_mask=_A )
if return_tensors is not None:
__a = padded_inputs.convert_to_tensors(_A )
return padded_inputs
| 99 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __lowerCamelCase ( __a ):
'''simple docstring'''
snake_case__ : List[Any] = '''trajectory_transformer'''
snake_case__ : Optional[Any] = ['''past_key_values''']
snake_case__ : Tuple = {
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , a__=100 , a__=5 , a__=1 , a__=1 , a__=249 , a__=6 , a__=17 , a__=25 , a__=4 , a__=4 , a__=128 , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0006 , a__=512 , a__=0.02 , a__=1e-12 , a__=1 , a__=True , a__=1 , a__=50256 , a__=50256 , **a__ , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = action_weight
__SCREAMING_SNAKE_CASE : Optional[int] = reward_weight
__SCREAMING_SNAKE_CASE : str = value_weight
__SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
__SCREAMING_SNAKE_CASE : Optional[Any] = block_size
__SCREAMING_SNAKE_CASE : Dict = action_dim
__SCREAMING_SNAKE_CASE : Optional[int] = observation_dim
__SCREAMING_SNAKE_CASE : Dict = transition_dim
__SCREAMING_SNAKE_CASE : Dict = learning_rate
__SCREAMING_SNAKE_CASE : List[str] = n_layer
__SCREAMING_SNAKE_CASE : Optional[int] = n_head
__SCREAMING_SNAKE_CASE : str = n_embd
__SCREAMING_SNAKE_CASE : int = embd_pdrop
__SCREAMING_SNAKE_CASE : List[Any] = attn_pdrop
__SCREAMING_SNAKE_CASE : Any = resid_pdrop
__SCREAMING_SNAKE_CASE : str = initializer_range
__SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
__SCREAMING_SNAKE_CASE : List[str] = kaiming_initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A )
| 211 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class _lowerCamelCase ( __a ):
"""simple docstring"""
UpperCAmelCase_ : Dict ="switch_transformers"
UpperCAmelCase_ : Dict =["past_key_values"]
UpperCAmelCase_ : Any ={"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , UpperCAmelCase=32128 , UpperCAmelCase=768 , UpperCAmelCase=64 , UpperCAmelCase=2048 , UpperCAmelCase=64 , UpperCAmelCase=12 , UpperCAmelCase=3 , UpperCAmelCase=12 , UpperCAmelCase=3 , UpperCAmelCase=12 , UpperCAmelCase=8 , UpperCAmelCase=False , UpperCAmelCase=0.01 , UpperCAmelCase="float32" , UpperCAmelCase=False , UpperCAmelCase=32 , UpperCAmelCase=128 , UpperCAmelCase=0.1 , UpperCAmelCase=1E-6 , UpperCAmelCase=0.001 , UpperCAmelCase=0.001 , UpperCAmelCase=1.0 , UpperCAmelCase="relu" , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0 , UpperCAmelCase=1 , **UpperCAmelCase , ) -> Optional[int]:
'''simple docstring'''
__snake_case : Dict = vocab_size
__snake_case : Union[str, Any] = d_model
__snake_case : List[Any] = d_kv
__snake_case : List[Any] = d_ff
__snake_case : Any = num_sparse_encoder_layers
__snake_case : List[str] = num_layers
__snake_case : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__snake_case : List[str] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
__snake_case : int = self.num_layers // self.num_sparse_encoder_layers
else:
__snake_case : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
__snake_case : Optional[int] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
__snake_case : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers
__snake_case : Tuple = num_heads
__snake_case : Tuple = num_experts
__snake_case : Union[str, Any] = expert_capacity
__snake_case : str = router_bias
__snake_case : Any = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
__snake_case : Any = router_dtype
__snake_case : str = router_ignore_padding_tokens
__snake_case : str = relative_attention_num_buckets
__snake_case : str = relative_attention_max_distance
__snake_case : Optional[Any] = dropout_rate
__snake_case : Tuple = layer_norm_epsilon
__snake_case : Optional[int] = initializer_factor
__snake_case : int = feed_forward_proj
__snake_case : Optional[int] = use_cache
__snake_case : Optional[int] = add_router_probs
__snake_case : Any = router_z_loss_coef
__snake_case : Tuple = router_aux_loss_coef
__snake_case : List[str] = self.feed_forward_proj.split("-" )
__snake_case : Union[str, Any] = act_info[-1]
__snake_case : Any = act_info[0] == '''gated'''
if len(_A ) > 1 and act_info[0] != "gated" or len(_A ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"\'gated-gelu\' or \'relu\'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__snake_case : List[Any] = '''gelu_new'''
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
| 243 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 0 |
"""simple docstring"""
from math import pi
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Any ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 646 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 0 |
'''simple docstring'''
import json
import sys
def __UpperCAmelCase ( a_: str, a_: Optional[int] ):
with open(lowerCAmelCase__, encoding="utf-8" ) as f:
_UpperCAmelCase : Union[str, Any] = json.load(lowerCAmelCase__ )
_UpperCAmelCase : str = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' ''']
for benchmark_name in sorted(lowerCAmelCase__ ):
_UpperCAmelCase : int = results[benchmark_name]
_UpperCAmelCase : Optional[int] = benchmark_name.split("/" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
_UpperCAmelCase : Optional[Any] = '''| metric |'''
_UpperCAmelCase : str = '''|--------|'''
_UpperCAmelCase : List[Any] = '''| new / old (diff) |'''
for metric_name in sorted(lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = benchmark_res[metric_name]
_UpperCAmelCase : List[Any] = metric_vals['''new''']
_UpperCAmelCase : int = metric_vals.get("old", lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = metric_vals.get("diff", lowerCAmelCase__ )
_UpperCAmelCase : Any = f""" {new_val:f}""" if isinstance(lowerCAmelCase__, (int, float) ) else '''None'''
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(lowerCAmelCase__, (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(lowerCAmelCase__, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(lowerCAmelCase__, "w", encoding="utf-8" ) as f:
f.writelines("\n".join(lowerCAmelCase__ ) )
if __name__ == "__main__":
__a = sys.argv[1]
__a = sys.argv[2]
format_json_to_md(input_json_file, output_md_file) | 494 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : int = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
snake_case : List[Any] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def A ( ) -> List[str]:
"""simple docstring"""
__magic_name__ = Github(os.environ['GITHUB_TOKEN'] )
__magic_name__ = g.get_repo('huggingface/transformers' )
__magic_name__ = repo.get_issues(state='open' )
for issue in open_issues:
__magic_name__ = sorted([comment for comment in issue.get_comments()] , key=lambda __snake_case : i.created_at , reverse=lowerCAmelCase__ )
__magic_name__ = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 545 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 0 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
UpperCamelCase = logging.get_logger(__name__)
class __lowerCamelCase ( __a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : int ) -> Tuple:
warnings.warn(
"The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use FlavaImageProcessor instead." , _A , )
super().__init__(*_A , **_A )
| 61 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 0 |
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE_ : List[Any] = ya
SCREAMING_SNAKE_CASE_ : Optional[int] = xa
for k in range(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : List[str] = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 0 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__magic_name__ = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
__magic_name__ = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
__magic_name__ = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
__magic_name__ = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"""] ,reference_urls=[
"""https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score""",
"""https://en.wikipedia.org/wiki/METEOR""",
] ,)
def _a ( self : str ,_a : Optional[Any] ):
'''simple docstring'''
import nltk
nltk.download("""wordnet""" )
if NLTK_VERSION >= version.Version("""3.6.5""" ):
nltk.download("""punkt""" )
if NLTK_VERSION >= version.Version("""3.6.6""" ):
nltk.download("""omw-1.4""" )
def _a ( self : str ,_a : Dict ,_a : Optional[Any] ,_a : Tuple=0.9 ,_a : Optional[int]=3 ,_a : Tuple=0.5 ):
'''simple docstring'''
if NLTK_VERSION >= version.Version("""3.6.5""" ):
A_ : Union[str, Any] = [
meteor_score.single_meteor_score(
word_tokenize(_A ) ,word_tokenize(_A ) ,alpha=_A ,beta=_A ,gamma=_A )
for ref, pred in zip(_A ,_A )
]
else:
A_ : Tuple = [
meteor_score.single_meteor_score(_A ,_A ,alpha=_A ,beta=_A ,gamma=_A )
for ref, pred in zip(_A ,_A )
]
return {"meteor": np.mean(_A )}
| 665 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 0 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase_ ( __a , unittest.TestCase ):
__lowerCamelCase = ConsistencyModelPipeline
__lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__lowerCamelCase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__lowerCamelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"output_type",
"return_dict",
"callback",
"callback_steps",
] )
@property
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet''' , )
return unet
@property
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : List[Any] =UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' , subfolder='''test_unet_class_cond''' , )
return unet
def _snake_case ( self , __A=False ) -> Optional[Any]:
if class_cond:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_cond_unet
else:
SCREAMING_SNAKE_CASE_ : Any =self.dummy_uncond_unet
# Default to CM multistep sampler
SCREAMING_SNAKE_CASE_ : int =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE_ : Any ={
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _snake_case ( self , __A , __A=0 ) -> Optional[int]:
if str(_A ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.manual_seed(_A )
else:
SCREAMING_SNAKE_CASE_ : int =torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : Tuple ={
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : str =ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] =pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : int =pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Any =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int =np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Any ='''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_components(class_cond=_A )
SCREAMING_SNAKE_CASE_ : List[Any] =ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE_ : str =pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : List[str] =self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Tuple =0
SCREAMING_SNAKE_CASE_ : Dict =pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Tuple =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : int =np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : List[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Any =ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Optional[int] =self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : List[str] =1
SCREAMING_SNAKE_CASE_ : Tuple =None
SCREAMING_SNAKE_CASE_ : Any =pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Any =np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : List[Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE_ : int =self.get_dummy_components(class_cond=_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =ConsistencyModelPipeline(**_A )
SCREAMING_SNAKE_CASE_ : int =pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_dummy_inputs(_A )
SCREAMING_SNAKE_CASE_ : Tuple =1
SCREAMING_SNAKE_CASE_ : List[Any] =None
SCREAMING_SNAKE_CASE_ : str =0
SCREAMING_SNAKE_CASE_ : str =pipe(**_A ).images
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Tuple =np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , __A=0 , __A=False , __A="cpu" , __A=torch.floataa , __A=(1, 3, 64, 64) ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Tuple =torch.manual_seed(_A )
SCREAMING_SNAKE_CASE_ : int ={
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
SCREAMING_SNAKE_CASE_ : Any =self.get_fixed_latents(seed=_A , device=_A , dtype=_A , shape=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =latents
return inputs
def _snake_case ( self , __A=0 , __A="cpu" , __A=torch.floataa , __A=(1, 3, 64, 64) ) -> Tuple:
if type(_A ) == str:
SCREAMING_SNAKE_CASE_ : int =torch.device(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =torch.Generator(device=_A ).manual_seed(_A )
SCREAMING_SNAKE_CASE_ : List[str] =randn_tensor(_A , generator=_A , device=_A , dtype=_A )
return latents
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : Any =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
SCREAMING_SNAKE_CASE_ : Dict =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE_ : Any =ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : str =self.get_inputs()
SCREAMING_SNAKE_CASE_ : List[Any] =pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Optional[int] =np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self ) -> int:
SCREAMING_SNAKE_CASE_ : Optional[Any] =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
SCREAMING_SNAKE_CASE_ : Tuple =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE_ : Any =ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : str =self.get_inputs()
SCREAMING_SNAKE_CASE_ : Optional[Any] =1
SCREAMING_SNAKE_CASE_ : Tuple =None
SCREAMING_SNAKE_CASE_ : str =pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Union[str, Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : Dict =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
SCREAMING_SNAKE_CASE_ : Optional[int] =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE_ : List[Any] =ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Any =self.get_inputs(get_fixed_latents=_A , device=_A )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
SCREAMING_SNAKE_CASE_ : Tuple =pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[Any] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : str =np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : str =UNetaDModel.from_pretrained('''diffusers/consistency_models''' , subfolder='''diffusers_cd_imagenet64_l2''' )
SCREAMING_SNAKE_CASE_ : Tuple =CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.002 , sigma_max=80.0 , )
SCREAMING_SNAKE_CASE_ : int =ConsistencyModelPipeline(unet=_A , scheduler=_A )
pipe.to(torch_device=_A , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_inputs(get_fixed_latents=_A , device=_A )
SCREAMING_SNAKE_CASE_ : Dict =1
SCREAMING_SNAKE_CASE_ : str =None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=_A , enable_math=_A , enable_mem_efficient=_A ):
SCREAMING_SNAKE_CASE_ : Dict =pipe(**_A ).images
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ : Optional[int] =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ : Dict =np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 443 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 0 |
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=[30, 30] , __A=2 , __A=3 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A=3 , __A=None , __A=8 , __A=10 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = is_training
__a = use_labels
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = scope
__a = n_targets
__a = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__a = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__a = num_patches + 1 + self.num_detection_tokens
def snake_case_ ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
__a = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__a = []
for i in range(self.batch_size ):
__a = {}
__a = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_A )
__a = torch.rand(self.n_targets , 4 , device=_A )
labels.append(_A )
__a = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def snake_case_ ( self , __A , __A , __A ):
__a = YolosModel(config=_A )
model.to(_A )
model.eval()
__a = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def snake_case_ ( self , __A , __A , __A ):
__a = YolosForObjectDetection(_A )
model.to(_A )
model.eval()
__a = model(pixel_values=_A )
__a = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
__a = model(pixel_values=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def snake_case_ ( self ):
__a = self.prepare_config_and_inputs()
__a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __a , __a , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
_lowerCamelCase = (
{"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {}
)
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def snake_case_ ( self , __A , __A , __A=False ):
__a = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__a = []
for i in range(self.model_tester.batch_size ):
__a = {}
__a = torch.ones(
size=(self.model_tester.n_targets,) , device=_A , dtype=torch.long )
__a = torch.ones(
self.model_tester.n_targets , 4 , device=_A , dtype=torch.float )
labels.append(_A )
__a = labels
return inputs_dict
def snake_case_ ( self ):
__a = YolosModelTester(self )
__a = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def snake_case_ ( self ):
self.config_tester.run_common_tests()
def snake_case_ ( self ):
pass
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_A )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs_for_common()
__a = True
# in YOLOS, the seq_len is different
__a = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__a = True
__a = False
__a = True
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__a = True
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__a = len(_A )
# Check attention is always last and order is fine
__a = True
__a = True
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = 1
self.assertEqual(out_len + added_hidden_states , len(_A ) )
__a = outputs.attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def snake_case_ ( self ):
def check_hidden_states_output(__A , __A , __A ):
__a = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_A , _A ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
# YOLOS has a different seq_length
__a = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_A , _A , _A )
def snake_case_ ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_A )
@slow
def snake_case_ ( self ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = YolosModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a ():
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case_ ( self ):
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def snake_case_ ( self ):
__a = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(_A )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="""pt""" ).to(_A )
# forward pass
with torch.no_grad():
__a = model(inputs.pixel_values )
# verify outputs
__a = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , _A )
__a = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_A , )
__a = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1E-4 ) )
# verify postprocessing
__a = image_processor.post_process_object_detection(
_A , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
__a = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(_A )
__a = [75, 75, 17, 63, 17]
__a = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(_A )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , _A , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , _A )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _A ) )
| 99 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
snake_case__ : List[Any] = StableDiffusionInpaintPipeline
snake_case__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
snake_case__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case__ : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
snake_case__ : Any = frozenset([] )
def a_ ( self ):
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
__SCREAMING_SNAKE_CASE : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(_A )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__SCREAMING_SNAKE_CASE : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a_ ( self , a__ , a__=0 ):
__SCREAMING_SNAKE_CASE : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((64, 64) )
__SCREAMING_SNAKE_CASE : int = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(_A ).startswith("mps" ):
__SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(_A )
else:
__SCREAMING_SNAKE_CASE : str = torch.Generator(device=_A ).manual_seed(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE : str = StableDiffusionInpaintPipeline(**_A )
__SCREAMING_SNAKE_CASE : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__SCREAMING_SNAKE_CASE : Dict = self.get_dummy_inputs(_A )
__SCREAMING_SNAKE_CASE : Any = sd_pipe(**_A ).images
__SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE : int = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a_ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def a_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__SCREAMING_SNAKE_CASE : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
__SCREAMING_SNAKE_CASE : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
__SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__SCREAMING_SNAKE_CASE : str = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type="np" , )
__SCREAMING_SNAKE_CASE : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def a_ ( self ):
__SCREAMING_SNAKE_CASE : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__SCREAMING_SNAKE_CASE : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
__SCREAMING_SNAKE_CASE : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
__SCREAMING_SNAKE_CASE : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type="np" , )
__SCREAMING_SNAKE_CASE : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def a_ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__SCREAMING_SNAKE_CASE : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__SCREAMING_SNAKE_CASE : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
__SCREAMING_SNAKE_CASE : str = PNDMScheduler.from_pretrained(_A , subfolder="scheduler" )
__SCREAMING_SNAKE_CASE : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
__SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type="np" , )
__SCREAMING_SNAKE_CASE : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 211 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 0 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_UpperCamelCase = Lock()
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Any:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__snake_case : int = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__snake_case : Union[str, Any] = min(lowerCAmelCase__ , lowerCAmelCase__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__snake_case : str = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__snake_case : List[Any] = max(lowerCAmelCase__ , lowerCAmelCase__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase__ )
def lowerCAmelCase__( lowercase : Optional[Any] ) -> Optional[Any]:
__snake_case : str = []
__snake_case : str = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__snake_case : Tuple = Pipe()
__snake_case : Dict = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__snake_case : Optional[Any] = temp_rs
__snake_case : int = temp_rr
for i in range(1 , len(lowerCAmelCase__ ) - 1 ):
__snake_case : Tuple = Pipe()
__snake_case : Any = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__snake_case : str = temp_rs
__snake_case : List[Any] = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase__ , args=(
len(lowerCAmelCase__ ) - 1,
arr[len(lowerCAmelCase__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase__ ) ):
__snake_case : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase__( ) -> Any:
__snake_case : int = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*lowerCAmelCase__ )
__snake_case : str = odd_even_transposition(lowerCAmelCase__ )
print("Sorted List\n" )
print(*lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 243 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A = {'''configuration_reformer''': ['''REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ReformerConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''ReformerTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''ReformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ReformerAttention''',
'''ReformerForMaskedLM''',
'''ReformerForQuestionAnswering''',
'''ReformerForSequenceClassification''',
'''ReformerLayer''',
'''ReformerModel''',
'''ReformerModelWithLMHead''',
'''ReformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 646 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 0 |
'''simple docstring'''
import functools
from typing import Any
def __UpperCAmelCase ( a_: int, a_: Tuple ):
# Validation
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError("the string should be not empty string" )
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ) or not all(
isinstance(lowerCAmelCase__, lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0 for item in words ):
raise ValueError("the words should be a list of non-empty strings" )
# Build trie
_UpperCAmelCase : dict[str, Any] = {}
_UpperCAmelCase : Optional[Any] = '''WORD_KEEPER'''
for word in words:
_UpperCAmelCase : Optional[Any] = trie
for c in word:
if c not in trie_node:
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : Union[str, Any] = trie_node[c]
_UpperCAmelCase : str = True
_UpperCAmelCase : List[Any] = len(lowerCAmelCase__ )
# Dynamic programming method
@functools.cache
def is_breakable(a_: List[str] ) -> bool:
if index == len_string:
return True
_UpperCAmelCase : int = trie
for i in range(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCAmelCase : str = trie_node.get(string[i], lowerCAmelCase__ )
if trie_node is None:
return False
if trie_node.get(lowerCAmelCase__, lowerCAmelCase__ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 0 |
"""simple docstring"""
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCAmelCase : Optional[Any] = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
_lowerCAmelCase : Optional[int] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
_lowerCAmelCase : Any = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n"
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ) -> int:
'''simple docstring'''
return float((preds == labels).mean() )
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str]="binary" ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = float(fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average=lowerCAmelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = {}
for id_pred, label in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : int = f'{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'
_UpperCAmelCase : Dict = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_UpperCAmelCase : List[Any] = [(pred, label)]
_UpperCAmelCase : Any = [], []
for question, preds_labels in question_map.items():
_UpperCAmelCase : Optional[int] = zip(*lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = fa_score(y_true=lowerCAmelCase__ , y_pred=lowerCAmelCase__ , average="macro" )
fas.append(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = int(sum(pred == label for pred, label in preds_labels ) == len(lowerCAmelCase__ ) )
ems.append(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = float(sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ ) )
_UpperCAmelCase : Any = sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )
_UpperCAmelCase : Dict = float(fa_score(y_true=lowerCAmelCase__ , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
def snake_case_ ( self : Tuple ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None , )
def snake_case_ ( self : List[str] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def snake_case_ ( self : Optional[Any] , A : List[Any] , A : Optional[Any] ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_A , _A )}
elif self.config_name == "cb":
return acc_and_fa(_A , _A , fa_avg="macro" )
elif self.config_name == "record":
_UpperCAmelCase : List[Any] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_UpperCAmelCase : str = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_A , _A )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_A , _A )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_A , _A )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 289 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Union[str, Any] = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCamelCase__ ( __a):
"""simple docstring"""
__UpperCAmelCase = """yolos"""
def __init__( self : Tuple , UpperCamelCase_ : Any=7_6_8 , UpperCamelCase_ : Optional[int]=1_2 , UpperCamelCase_ : Union[str, Any]=1_2 , UpperCamelCase_ : List[str]=3_0_7_2 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : int=1e-1_2 , UpperCamelCase_ : List[Any]=[5_1_2, 8_6_4] , UpperCamelCase_ : Tuple=1_6 , UpperCamelCase_ : str=3 , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Dict=1_0_0 , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : Any=1 , UpperCamelCase_ : str=5 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Optional[Any]=5 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Optional[int]=0.1 , **UpperCamelCase_ : Any , ):
'''simple docstring'''
super().__init__(**_A )
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = initializer_range
__magic_name__ = layer_norm_eps
__magic_name__ = image_size
__magic_name__ = patch_size
__magic_name__ = num_channels
__magic_name__ = qkv_bias
__magic_name__ = num_detection_tokens
__magic_name__ = use_mid_position_embeddings
__magic_name__ = auxiliary_loss
# Hungarian matcher
__magic_name__ = class_cost
__magic_name__ = bbox_cost
__magic_name__ = giou_cost
# Loss coefficients
__magic_name__ = bbox_loss_coefficient
__magic_name__ = giou_loss_coefficient
__magic_name__ = eos_coefficient
class UpperCamelCase__ ( __a):
"""simple docstring"""
__UpperCAmelCase = version.parse("""1.11""")
@property
def a__ ( self : str ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def a__ ( self : Any ):
'''simple docstring'''
return 1e-4
@property
def a__ ( self : int ):
'''simple docstring'''
return 1_2 | 545 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 0 |
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
'google/tapas-base-finetuned-sqa': (
'https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'
),
'google/tapas-base-finetuned-wtq': (
'https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'
),
'google/tapas-base-finetuned-wikisql-supervised': (
'https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'
),
'google/tapas-base-finetuned-tabfact': (
'https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'
),
}
class __lowerCamelCase ( __a ):
"""simple docstring"""
snake_case__ = "tapas"
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int=30_522 , SCREAMING_SNAKE_CASE__ : Optional[Any]=768 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Tuple=12 , SCREAMING_SNAKE_CASE__ : List[Any]=3_072 , SCREAMING_SNAKE_CASE__ : Dict="gelu" , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE__ : Any=1_024 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1e-1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0 , SCREAMING_SNAKE_CASE__ : int=10.0 , SCREAMING_SNAKE_CASE__ : List[str]=0 , SCREAMING_SNAKE_CASE__ : List[Any]=1.0 , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=1.0 , SCREAMING_SNAKE_CASE__ : Optional[int]=1.0 , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Optional[Any]="ratio" , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Any=64 , SCREAMING_SNAKE_CASE__ : Dict=32 , SCREAMING_SNAKE_CASE__ : int=False , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Optional[int] , ) -> Optional[Any]:
super().__init__(pad_token_id=_A , **_A )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_sizes
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = layer_norm_eps
# Fine-tuning task hyperparameters
lowerCAmelCase__ = positive_label_weight
lowerCAmelCase__ = num_aggregation_labels
lowerCAmelCase__ = aggregation_loss_weight
lowerCAmelCase__ = use_answer_as_supervision
lowerCAmelCase__ = answer_loss_importance
lowerCAmelCase__ = use_normalized_answer_loss
lowerCAmelCase__ = huber_loss_delta
lowerCAmelCase__ = temperature
lowerCAmelCase__ = aggregation_temperature
lowerCAmelCase__ = use_gumbel_for_cells
lowerCAmelCase__ = use_gumbel_for_aggregation
lowerCAmelCase__ = average_approximation_function
lowerCAmelCase__ = cell_selection_preference
lowerCAmelCase__ = answer_loss_cutoff
lowerCAmelCase__ = max_num_rows
lowerCAmelCase__ = max_num_columns
lowerCAmelCase__ = average_logits_per_cell
lowerCAmelCase__ = select_one_column
lowerCAmelCase__ = allow_empty_column_selection
lowerCAmelCase__ = init_cell_selection_weights_to_zero
lowerCAmelCase__ = reset_position_index_per_cell
lowerCAmelCase__ = disable_per_token_loss
# Aggregation hyperparameters
lowerCAmelCase__ = aggregation_labels
lowerCAmelCase__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , _A ):
lowerCAmelCase__ = {int(_A ): v for k, v in aggregation_labels.items()}
| 61 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case_ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class SCREAMING_SNAKE_CASE__ ( __a ):
_A = ["pixel_values"]
def __init__( self , lowercase__ = True , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = True , lowercase__ = 1 / 255 , lowercase__ = True , lowercase__ = None , lowercase__ = None , lowercase__ = True , **lowercase__ , ):
"""simple docstring"""
super().__init__(**_A )
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else {'''shortest_edge''': 224}
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_A , default_to_square=_A )
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_size_dict(_A , default_to_square=_A , param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : int = do_resize
SCREAMING_SNAKE_CASE_ : Tuple = size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = resample
SCREAMING_SNAKE_CASE_ : int = do_center_crop
SCREAMING_SNAKE_CASE_ : str = crop_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_factor
SCREAMING_SNAKE_CASE_ : List[str] = do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ : Dict = do_convert_rgb
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
SCREAMING_SNAKE_CASE_ : List[Any] = get_resize_output_image_size(_A , size=size["shortest_edge"] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(_A , size=(size["height"], size["width"]) , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCamelCase ( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : List[Any] = get_size_dict(_A , param_name="size" , default_to_square=_A )
SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : List[str] = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(_A , param_name="crop_size" , default_to_square=_A )
SCREAMING_SNAKE_CASE_ : int = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Tuple = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ : str = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Dict = [to_numpy_array(_A ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : List[Any] = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
SCREAMING_SNAKE_CASE_ : List[Any] = [to_channel_dimension_format(_A , _A ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 421 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 0 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowerCamelCase ( lowerCamelCase : str):
return np.maximum(0 , lowerCAmelCase__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowercase_ :
def __init__( self , __A=2 , __A=3 , __A=64 , __A=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : int =np.random.default_rng(_A )
SCREAMING_SNAKE_CASE_ : Any =length
SCREAMING_SNAKE_CASE_ : int =rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : Optional[Any] =a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> str:
return self.length
def __getitem__( self , __A ) -> int:
return {"x": self.x[i], "y": self.y[i]}
class lowercase_ ( torch.nn.Module ):
def __init__( self , __A=0 , __A=0 , __A=False ) -> Dict:
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ : Dict =torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ : List[Any] =True
def _snake_case ( self , __A=None ) -> str:
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
SCREAMING_SNAKE_CASE_ : Any =False
return x * self.a[0] + self.b[0]
class lowercase_ ( torch.nn.Module ):
def __init__( self , __A=0 , __A=0 , __A=False ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict =torch.nn.Parameter(torch.tensor(_A ).float() )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.nn.Parameter(torch.tensor(_A ).float() )
SCREAMING_SNAKE_CASE_ : str =True
def _snake_case ( self , __A=None ) -> Union[str, Any]:
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
SCREAMING_SNAKE_CASE_ : int =False
return x * self.a + self.b
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict = 1_6 ) -> Dict:
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] =AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE_ : Dict ={'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
SCREAMING_SNAKE_CASE_ : str =load_dataset('''csv''' , data_files=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict =datasets['''train'''].unique('''label''' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] ={v: i for i, v in enumerate(lowerCAmelCase__ )}
def tokenize_function(UpperCAmelCase_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : Any =tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ : Optional[int] =datasets.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(UpperCAmelCase_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : Optional[Any] =DataLoader(tokenized_datasets['''train'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 )
SCREAMING_SNAKE_CASE_ : Tuple =DataLoader(tokenized_datasets['''validation'''] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 )
return train_dataloader, eval_dataloader
| 443 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
__a = tempfile.mkdtemp()
__a = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__a = {
'''do_resize''': True,
'''size''': {'''height''': 224, '''width''': 224},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
'''do_convert_rgb''': True,
}
__a = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_A , _A )
def snake_case_ ( self , **__A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def snake_case_ ( self , **__A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_A )
def snake_case_ ( self , **__A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **_A )
def snake_case_ ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self ):
__a = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__a = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self ):
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = self.get_image_processor()
__a = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__a = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__a = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def snake_case_ ( self ):
__a = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__a = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
__a = self.get_image_processor(do_normalize=_A )
__a = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a = self.prepare_image_inputs()
__a = image_processor(_A , return_tensors="""np""" )
__a = processor(images=_A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a = '''Alexandra,T-shirt的价格是15便士。'''
__a = processor(text=_A )
__a = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a = '''Alexandra,T-shirt的价格是15便士。'''
__a = self.prepare_image_inputs()
__a = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__a = processor.batch_decode(_A )
__a = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def snake_case_ ( self ):
__a = self.get_image_processor()
__a = self.get_tokenizer()
__a = ChineseCLIPProcessor(tokenizer=_A , image_processor=_A )
__a = '''Alexandra,T-shirt的价格是15便士。'''
__a = self.prepare_image_inputs()
__a = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 99 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 0 |
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase = logging.get_logger(__name__)
class __lowerCamelCase ( enum.Enum ):
'''simple docstring'''
snake_case__ : List[str] = 0
snake_case__ : Any = 1
@add_end_docstrings(__a )
class __lowerCamelCase ( __a ):
'''simple docstring'''
snake_case__ : Any = '''generated'''
def __init__( self , *a__ , **a__ ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def a_ ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , **a__ , ):
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
if truncation is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = truncation
__SCREAMING_SNAKE_CASE : Any = generate_kwargs
__SCREAMING_SNAKE_CASE : Optional[Any] = {}
if return_tensors is not None and return_type is None:
__SCREAMING_SNAKE_CASE : Any = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__SCREAMING_SNAKE_CASE : Tuple = return_type
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE : List[str] = clean_up_tokenization_spaces
if stop_sequence is not None:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def a_ ( self , a__ , a__ , a__ ):
return True
def a_ ( self , *a__ , a__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
__SCREAMING_SNAKE_CASE : Tuple = ([prefix + arg for arg in args[0]],)
__SCREAMING_SNAKE_CASE : Dict = True
elif isinstance(args[0] , _A ):
__SCREAMING_SNAKE_CASE : List[str] = (prefix + args[0],)
__SCREAMING_SNAKE_CASE : Dict = False
else:
raise ValueError(
f' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`' )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *a__ , **a__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def a_ ( self , a__ , a__=TruncationStrategy.DO_NOT_TRUNCATE , **a__ ):
__SCREAMING_SNAKE_CASE : str = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def a_ ( self , a__ , **a__ ):
if self.framework == "pt":
__SCREAMING_SNAKE_CASE : Optional[int] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.shape(model_inputs["input_ids"] ).numpy()
__SCREAMING_SNAKE_CASE : str = generate_kwargs.get("min_length" , self.model.config.min_length )
__SCREAMING_SNAKE_CASE : Optional[Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
__SCREAMING_SNAKE_CASE : int = self.model.generate(**_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = output_ids.shape[0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE : str = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE : Union[str, Any] = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def a_ ( self , a__ , a__=ReturnType.TEXT , a__=False ):
__SCREAMING_SNAKE_CASE : Optional[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__SCREAMING_SNAKE_CASE : Any = {f'{self.return_name}_token_ids': output_ids}
elif return_type == ReturnType.TEXT:
__SCREAMING_SNAKE_CASE : List[str] = {
f'{self.return_name}_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(__a )
class __lowerCamelCase ( __a ):
'''simple docstring'''
snake_case__ : str = '''summary'''
def __call__( self , *a__ , **a__ ):
return super().__call__(*_A , **_A )
def a_ ( self , a__ , a__ , a__ ):
if max_length < min_length:
logger.warning(f'Your min_length={min_length} must be inferior than your max_length={max_length}.' )
if input_length < max_length:
logger.warning(
f'Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f'consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})' )
@add_end_docstrings(__a )
class __lowerCamelCase ( __a ):
'''simple docstring'''
snake_case__ : List[str] = '''translation'''
def a_ ( self , a__ , a__ , a__ ):
if input_length > 0.9 * max_length:
logger.warning(
f'Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '
"increasing your max_length manually, e.g. translator(\'...\', max_length=400)" )
return True
def a_ ( self , *a__ , a__=TruncationStrategy.DO_NOT_TRUNCATE , a__=None , a__=None ):
if getattr(self.tokenizer , "_build_translation_inputs" , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def a_ ( self , a__=None , a__=None , **a__ ):
__SCREAMING_SNAKE_CASE : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
__SCREAMING_SNAKE_CASE : int = src_lang
if tgt_lang is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__SCREAMING_SNAKE_CASE : List[Any] = kwargs.get("task" , self.task )
__SCREAMING_SNAKE_CASE : int = task.split("_" )
if task and len(_A ) == 4:
# translation, XX, to YY
__SCREAMING_SNAKE_CASE : Any = items[1]
__SCREAMING_SNAKE_CASE : Optional[int] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *a__ , **a__ ):
return super().__call__(*_A , **_A )
| 211 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 0 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_UpperCamelCase, _UpperCamelCase, _UpperCamelCase = False, False, False
@dataclass
class _lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ : List[str] =None
UpperCAmelCase_ : Dict =True
UpperCAmelCase_ : List[Any] =True
UpperCAmelCase_ : Optional[Any] =None
# Automatically constructed
UpperCAmelCase_ : str ="dict"
UpperCAmelCase_ : Any =pa.struct({"bytes": pa.binary(), "path": pa.string()} )
UpperCAmelCase_ : str =field(default="Audio" , init=__a , repr=__a )
def __call__( self ) -> Optional[Any]:
'''simple docstring'''
return self.pa_type
def UpperCAmelCase ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install \'soundfile\'." ) from err
if isinstance(_A , _A ):
return {"bytes": None, "path": value}
elif isinstance(_A , _A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__snake_case : Optional[int] = BytesIO()
sf.write(_A , value["array"] , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a \'sampling_rate\' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__snake_case : Dict = np.frombuffer(value["bytes"] , dtype=np.intaa ).astype(np.floataa ) / 32767
else:
__snake_case : Optional[int] = np.memmap(value["path"] , dtype="h" , mode="r" ).astype(np.floataa ) / 32767
__snake_case : Tuple = BytesIO(bytes() )
sf.write(_A , _A , value["sampling_rate"] , format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCAmelCase ( self , UpperCAmelCase , UpperCAmelCase = None ) -> str:
'''simple docstring'''
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
__snake_case : List[str] = (value['''path'''], BytesIO(value["bytes"] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install \'librosa\' and \'soundfile\'." ) from err
__snake_case : Optional[Any] = xsplitext(_A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
__snake_case : Tuple = token_per_repo_id or {}
__snake_case : Optional[Any] = path.split("::" )[-1]
try:
__snake_case : int = string_to_dict(_A , config.HUB_DATASETS_URL )['''repo_id''']
__snake_case : Union[str, Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__snake_case : List[str] = None
with xopen(_A , "rb" , use_auth_token=_A ) as f:
__snake_case : str = sf.read(_A )
else:
__snake_case : str = sf.read(_A )
__snake_case : int = array.T
if self.mono:
__snake_case : str = librosa.to_mono(_A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__snake_case : Optional[Any] = librosa.resample(_A , orig_sr=_A , target_sr=self.sampling_rate )
__snake_case : int = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def UpperCAmelCase ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
if pa.types.is_string(storage.type ):
__snake_case : Optional[Any] = pa.array([None] * len(_A ) , type=pa.binary() )
__snake_case : List[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__snake_case : Optional[int] = pa.array([None] * len(_A ) , type=pa.string() )
__snake_case : int = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
__snake_case : List[Any] = pa.array([Audio().encode_example(_A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
__snake_case : Optional[Any] = storage.field("bytes" )
else:
__snake_case : Any = pa.array([None] * len(_A ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
__snake_case : List[Any] = storage.field("path" )
else:
__snake_case : List[str] = pa.array([None] * len(_A ) , type=pa.string() )
__snake_case : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
return array_cast(_A , self.pa_type )
def UpperCAmelCase ( self , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase ):
with xopen(_A , "rb" ) as f:
__snake_case : List[str] = f.read()
return bytes_
__snake_case : Any = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__snake_case : Tuple = pa.array(
[os.path.basename(_A ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
__snake_case : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(_A , self.pa_type )
| 243 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 0 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _snake_case ( __a , unittest.TestCase ):
snake_case__ = BarthezTokenizer
snake_case__ = BarthezTokenizerFast
snake_case__ = True
snake_case__ = True
def lowerCamelCase__ ( self : List[Any] ):
super().setUp()
__lowerCamelCase : List[Any] = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_A )
__lowerCamelCase : Dict = tokenizer
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : List[Any] = '''<pad>'''
__lowerCamelCase : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_A ) , 101122 )
def lowerCamelCase__ ( self : int ):
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase__ ( self : Optional[int] ):
__lowerCamelCase : Optional[Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
__lowerCamelCase : Union[str, Any] = [0, 57, 3018, 70307, 91, 2]
__lowerCamelCase : List[str] = self.tokenizer(
_A , max_length=len(_A ) , padding=_A , truncation=_A , return_tensors="pt" )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCamelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
def lowerCamelCase__ ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : Tuple = self.get_rust_tokenizer()
__lowerCamelCase : List[Any] = '''I was born in 92000, and this is falsé.'''
__lowerCamelCase : Dict = tokenizer.tokenize(_A )
__lowerCamelCase : Any = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__lowerCamelCase : Optional[int] = tokenizer.encode(_A , add_special_tokens=_A )
__lowerCamelCase : Optional[Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__lowerCamelCase : Tuple = self.get_rust_tokenizer()
__lowerCamelCase : Tuple = tokenizer.encode(_A )
__lowerCamelCase : int = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : str = {'''input_ids''': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCamelCase : Optional[Any] = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_A , ) | 646 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 0 |
'''simple docstring'''
import os
import sys
__a = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
__a = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __UpperCAmelCase ( *a_: str, **a_: Tuple ):
return AutoConfig.from_pretrained(*lowerCAmelCase__, **lowerCAmelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __UpperCAmelCase ( *a_: List[str], **a_: int ):
return AutoTokenizer.from_pretrained(*lowerCAmelCase__, **lowerCAmelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __UpperCAmelCase ( *a_: Tuple, **a_: Tuple ):
return AutoModel.from_pretrained(*lowerCAmelCase__, **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __UpperCAmelCase ( *a_: Optional[int], **a_: Dict ):
return AutoModelForCausalLM.from_pretrained(*lowerCAmelCase__, **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __UpperCAmelCase ( *a_: Tuple, **a_: List[str] ):
return AutoModelForMaskedLM.from_pretrained(*lowerCAmelCase__, **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __UpperCAmelCase ( *a_: Dict, **a_: Dict ):
return AutoModelForSequenceClassification.from_pretrained(*lowerCAmelCase__, **lowerCAmelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __UpperCAmelCase ( *a_: Optional[Any], **a_: Any ):
return AutoModelForQuestionAnswering.from_pretrained(*lowerCAmelCase__, **lowerCAmelCase__ ) | 494 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 0 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_lowerCAmelCase : List[str] = 20_48
_lowerCAmelCase : Optional[Any] = 40_96
_lowerCAmelCase : Optional[int] = 42
_lowerCAmelCase : int = os.environ.pop("PROCESS_TRAIN", "false")
_lowerCAmelCase : Union[str, Any] = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]:
'''simple docstring'''
def choose_first(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int=False ):
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 1:
_UpperCAmelCase : Optional[int] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
_UpperCAmelCase : Tuple = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
_UpperCAmelCase : List[Any] = {'''id''': example['''id''']}
_UpperCAmelCase : int = example['''annotations''']
_UpperCAmelCase : Any = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
_UpperCAmelCase : Union[str, Any] = ['''yes'''] if 1 in yes_no_answer else ['''no''']
_UpperCAmelCase : int = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Dict = ['''<cls>''']
else:
_UpperCAmelCase : Tuple = ['''short''']
_UpperCAmelCase : Optional[Any] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
_UpperCAmelCase : Dict = ['''long''']
_UpperCAmelCase : List[str] = choose_first(annotation["long_answer"] , is_long_answer=lowerCAmelCase__ )
_UpperCAmelCase : str = []
answer.update(lowerCAmelCase__ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
_UpperCAmelCase : Optional[int] = True
else:
_UpperCAmelCase : Any = False
_UpperCAmelCase : str = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , lowerCAmelCase__ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=False ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = _get_single_answer(lowerCAmelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCAmelCase : int = example['''document''']['''tokens''']
_UpperCAmelCase : str = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowerCAmelCase__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
_UpperCAmelCase : List[Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
_UpperCAmelCase : str = example['''document''']['''tokens''']
_UpperCAmelCase : List[Any] = answer['''start_token''']
_UpperCAmelCase : int = answer['''end_token''']
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
_UpperCAmelCase : Any = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
_UpperCAmelCase : Any = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
_UpperCAmelCase : Union[str, Any] = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
_UpperCAmelCase : str = ''' '''.join([old[i] for i in range(len(lowerCAmelCase__ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowerCAmelCase__ , end="\n" )
print("Old:" , lowerCAmelCase__ , end="\n\n" )
return {
"context": " ".join(lowerCAmelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=2_048 , SCREAMING_SNAKE_CASE__ : Any=4_096 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Any = get_context_and_ans(lowerCAmelCase__ , assertion=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
_UpperCAmelCase : str = tokenizer(example["question"]["text"] , out["context"] ).input_ids
_UpperCAmelCase : Optional[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : int = input_ids[:q_len]
_UpperCAmelCase : Any = range(lowerCAmelCase__ , len(lowerCAmelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
_UpperCAmelCase : Tuple = i + max_length - q_len
_UpperCAmelCase : Optional[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowerCAmelCase__ ),
"end_token": [-100] * len(lowerCAmelCase__ ),
"category": category,
},
}
_UpperCAmelCase : Any = out['''context'''].split()
_UpperCAmelCase : Any = splitted_context[answer['''end_token''']]
_UpperCAmelCase : Dict = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowerCAmelCase__ , ).input_ids )
_UpperCAmelCase : Optional[Any] = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowerCAmelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
_UpperCAmelCase : Union[str, Any] = len(tokenizer(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
_UpperCAmelCase : Dict = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
_UpperCAmelCase : List[Any] = answer['''start_token''']
_UpperCAmelCase : Any = answer['''end_token''']
if assertion:
_UpperCAmelCase : List[Any] = tokenizer.decode(lowerCAmelCase__ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowerCAmelCase__ , end="\n\n" )
if len(lowerCAmelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
_UpperCAmelCase : Optional[Any] = input_ids[:q_len]
_UpperCAmelCase : Any = range(lowerCAmelCase__ , len(lowerCAmelCase__ ) , max_length - doc_stride )
_UpperCAmelCase : Any = []
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[int] = [] # null, yes, no, long, short
for i in doc_start_indices:
_UpperCAmelCase : List[str] = i + max_length - q_len
_UpperCAmelCase : Optional[int] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
_UpperCAmelCase : List[Any] = start_token - i + q_len
_UpperCAmelCase : List[str] = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
_UpperCAmelCase : Union[str, Any] = -100
_UpperCAmelCase : Tuple = -100
answers_category.append("null" )
_UpperCAmelCase : List[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowerCAmelCase__ )
answers_end_token.append(lowerCAmelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowerCAmelCase__ ) )
print("Old:" , tokenizer.decode(lowerCAmelCase__ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __snake_case ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=2_048 , SCREAMING_SNAKE_CASE__ : Any=4_096 , SCREAMING_SNAKE_CASE__ : Any=False ) -> int:
'''simple docstring'''
_UpperCAmelCase : Any = get_strided_contexts_and_ans(
lowerCAmelCase__ , lowerCAmelCase__ , doc_stride=lowerCAmelCase__ , max_length=lowerCAmelCase__ , assertion=lowerCAmelCase__ , )
return example
def __snake_case ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ) -> str:
'''simple docstring'''
with jsonlines.open(lowerCAmelCase__ , "a" ) as writer:
for example in tqdm(lowerCAmelCase__ , total=len(lowerCAmelCase__ ) , desc="Saving samples ... " ):
_UpperCAmelCase : Dict = example['''labels''']
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_lowerCAmelCase : Optional[int] = load_dataset("natural_questions")
_lowerCAmelCase : Optional[int] = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
_lowerCAmelCase : Dict = data["train" if PROCESS_TRAIN == "true" else "validation"]
_lowerCAmelCase : List[str] = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
_lowerCAmelCase : List[Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_lowerCAmelCase : int = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
_lowerCAmelCase : Any = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 289 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 0 |
"""simple docstring"""
def A ( __snake_case: str = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
__magic_name__ = 1
__magic_name__ = 1
__magic_name__ = {1: 1}
for inputa in range(2 , lowerCAmelCase__ ):
__magic_name__ = 0
__magic_name__ = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
__magic_name__ = (3 * number) + 1
counter += 1
if inputa not in counters:
__magic_name__ = counter
if counter > pre_counter:
__magic_name__ = inputa
__magic_name__ = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip()))) | 545 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 0 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
UpperCamelCase = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = None
# source code of `config_class`
lowerCAmelCase__ = inspect.getsource(lowerCAmelCase__ )
lowerCAmelCase__ = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
lowerCAmelCase__ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ = F'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ = ckpt_name
break
return checkpoint
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ = get_checkpoint_from_config_class(lowerCAmelCase__ )
lowerCAmelCase__ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
lowerCAmelCase__ = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 61 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 0 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict=False ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = OmegaConf.load(lowerCAmelCase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCAmelCase__ ) ) )
return config
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Any=None ) -> Optional[Any]:
"""simple docstring"""
if conf_path is None:
SCREAMING_SNAKE_CASE_ : Dict = '''./model_checkpoints/vqgan_only.yaml'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_config(lowerCAmelCase__ , display=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int = VQModel(**config.model.params )
if ckpt_path is None:
SCREAMING_SNAKE_CASE_ : List[str] = '''./model_checkpoints/vqgan_only.pt'''
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowerCAmelCase__ , map_location=lowerCAmelCase__ )
if ".ckpt" in ckpt_path:
SCREAMING_SNAKE_CASE_ : Tuple = sd['''state_dict''']
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
del sd
return model
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = model.encode(lowerCAmelCase__ )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
SCREAMING_SNAKE_CASE_ : str = model.decode(lowerCAmelCase__ )
return xrec
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = string.rsplit("." , 1 )
if reload:
SCREAMING_SNAKE_CASE_ : Tuple = importlib.import_module(lowerCAmelCase__ )
importlib.reload(lowerCAmelCase__ )
return getattr(importlib.import_module(lowerCAmelCase__ , package=lowerCAmelCase__ ) , cls )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : List[str]=True ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = instantiate_from_config(lowerCAmelCase__ )
if sd is not None:
model.load_state_dict(lowerCAmelCase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
if ckpt:
SCREAMING_SNAKE_CASE_ : Tuple = torch.load(lowerCAmelCase__ , map_location="cpu" )
SCREAMING_SNAKE_CASE_ : Dict = pl_sd['''global_step''']
print(F"loaded model from global step {global_step}." )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = {'''state_dict''': None}
SCREAMING_SNAKE_CASE_ : str = None
SCREAMING_SNAKE_CASE_ : Tuple = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowerCAmelCase__ , eval_mode=lowerCAmelCase__ )['''model''']
return model, global_step
| 421 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 0 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : List[Any]):
A_ : Optional[int] = [False] * len(lowerCAmelCase__)
A_ : str = [-1] * len(lowerCAmelCase__)
def dfs(lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any]):
A_ : Dict = True
A_ : List[str] = c
for u in graph[v]:
if not visited[u]:
dfs(lowerCAmelCase__ , 1 - c)
for i in range(len(lowerCAmelCase__)):
if not visited[i]:
dfs(lowerCAmelCase__ , 0)
for i in range(len(lowerCAmelCase__)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__magic_name__ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 665 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 0 |
import math
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] ) -> list[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =[]
SCREAMING_SNAKE_CASE_ : Dict =2
SCREAMING_SNAKE_CASE_ : Optional[Any] =int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
SCREAMING_SNAKE_CASE_ : str =[True] * (end + 1)
SCREAMING_SNAKE_CASE_ : Any =[]
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict =False
start += 1
prime += in_prime
SCREAMING_SNAKE_CASE_ : Optional[int] =end + 1
SCREAMING_SNAKE_CASE_ : str =min(2 * end , lowerCAmelCase__ )
while low <= n:
SCREAMING_SNAKE_CASE_ : List[str] =[True] * (high - low + 1)
for each in in_prime:
SCREAMING_SNAKE_CASE_ : List[str] =math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =high + 1
SCREAMING_SNAKE_CASE_ : str =min(high + end , lowerCAmelCase__ )
return prime
print(sieve(10**6))
| 443 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE = threading.Lock()
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
SCREAMING_SNAKE_CASE = logging.WARNING
SCREAMING_SNAKE_CASE = True
def a ():
__a = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowerCAmelCase__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def a ():
return __name__.split(""".""" )[0]
def a ():
return logging.getLogger(_get_library_name() )
def a ():
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
__a = logging.StreamHandler() # Set sys.stderr as stream.
__a = sys.stderr.flush
# Apply our default configuration to the library root logger.
__a = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
__a = False
def a ():
global _default_handler
with _lock:
if not _default_handler:
return
__a = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
__a = None
def a ():
return log_levels
def a (lowerCAmelCase__ = None ):
if name is None:
__a = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowerCAmelCase__ )
def a ():
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def a (lowerCAmelCase__ ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowerCAmelCase__ )
def a ():
return set_verbosity(lowerCAmelCase__ )
def a ():
return set_verbosity(lowerCAmelCase__ )
def a ():
return set_verbosity(lowerCAmelCase__ )
def a ():
return set_verbosity(lowerCAmelCase__ )
def a ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def a ():
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def a (lowerCAmelCase__ ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowerCAmelCase__ )
def a (lowerCAmelCase__ ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowerCAmelCase__ )
def a ():
_configure_library_root_logger()
__a = False
def a ():
_configure_library_root_logger()
__a = True
def a ():
__a = _get_library_root_logger().handlers
for handler in handlers:
__a = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(lowerCAmelCase__ )
def a ():
__a = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowerCAmelCase__ )
def a (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
__a = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , lowerCAmelCase__ )
if no_advisory_warnings:
return
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = warning_advice
@functools.lru_cache(lowerCAmelCase__ )
def a (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
self.warning(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = warning_once
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , *__A , **__A ): # pylint: disable=unused-argument
__a = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , __A ):
def empty_fn(*__A , **__A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , __A , __A , __A ):
return
class __UpperCAmelCase :
"""simple docstring"""
def __call__( self , *__A , **__A ):
if _tqdm_active:
return tqdm_lib.tqdm(*_A , **_A )
else:
return EmptyTqdm(*_A , **_A )
def snake_case_ ( self , *__A , **__A ):
__a = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_A , **_A )
def snake_case_ ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE = _tqdm_cls()
def a ():
global _tqdm_active
return bool(_tqdm_active )
def a ():
global _tqdm_active
__a = True
hf_hub_utils.enable_progress_bars()
def a ():
global _tqdm_active
__a = False
hf_hub_utils.disable_progress_bars()
| 99 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 0 |
'''simple docstring'''
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowercase = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__SCREAMING_SNAKE_CASE : Any = get_sagemaker_input()
else:
__SCREAMING_SNAKE_CASE : List[str] = get_cluster_input()
return config
def __A ( _SCREAMING_SNAKE_CASE : Tuple=None ):
"""simple docstring"""
if subparsers is not None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = subparsers.add_parser("config" , description=lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser("Accelerate config command" , description=lowerCAmelCase__ )
parser.add_argument(
"--config_file" , default=lowerCAmelCase__ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def __A ( _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = get_user_input()
if args.config_file is not None:
__SCREAMING_SNAKE_CASE : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(f'accelerate configuration saved at {config_file}' )
def __A ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = config_command_parser()
__SCREAMING_SNAKE_CASE : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 211 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
class _lowerCamelCase ( __a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] ="timm_backbone"
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=3 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
__snake_case : List[Any] = backbone
__snake_case : Union[str, Any] = num_channels
__snake_case : str = features_only
__snake_case : List[str] = use_pretrained_backbone
__snake_case : Union[str, Any] = True
__snake_case : Union[str, Any] = out_indices if out_indices is not None else (-1,)
| 243 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 0 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="utf-8" , check=_A , )
assert hasattr(self , "env" )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Optional[Any] ):
__lowerCamelCase : List[Any] = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
__lowerCamelCase : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version="py36" , )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Any ):
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[int] ):
__lowerCamelCase : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
__lowerCamelCase : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowerCamelCase : Dict = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
__lowerCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowerCamelCase : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _A ) | 646 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 0 |
'''simple docstring'''
__a = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def __UpperCAmelCase ( a_: Dict ):
# Make sure the supplied data is a bytes-like object
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(lowerCAmelCase__ )
_UpperCAmelCase : Dict = ''''''.join(bin(lowerCAmelCase__ )[2:].zfill(8 ) for byte in data )
_UpperCAmelCase : List[Any] = len(lowerCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
_UpperCAmelCase : List[Any] = b'''=''' * ((6 - len(lowerCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(lowerCAmelCase__ ) % 6)
else:
_UpperCAmelCase : Union[str, Any] = b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(lowerCAmelCase__ ), 6 ) ).encode()
+ padding
)
def __UpperCAmelCase ( a_: Union[str, Any] ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(lowerCAmelCase__, lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
_UpperCAmelCase : Dict = (
'''argument should be a bytes-like object or ASCII string, '''
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(lowerCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
try:
_UpperCAmelCase : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
_UpperCAmelCase : int = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(lowerCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_UpperCAmelCase : Optional[int] = encoded_data[:-padding]
_UpperCAmelCase : Any = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_UpperCAmelCase : str = ''''''.join(
bin(B64_CHARSET.index(lowerCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
_UpperCAmelCase : Union[str, Any] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(lowerCAmelCase__ ), 8 )
]
return bytes(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 0 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase_ ( yaml.SafeLoader ):
def snake_case_ ( self : Optional[Any] , A : List[str] ):
_UpperCAmelCase : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
_UpperCAmelCase : List[str] = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
_UpperCAmelCase : str = Counter(_A )
_UpperCAmelCase : Dict = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'Got duplicate yaml keys: {duplicate_keys}' )
def snake_case_ ( self : Optional[Any] , A : Tuple , A : Tuple=False ):
_UpperCAmelCase : Optional[int] = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> Tuple[Optional[str], str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_UpperCAmelCase : Any = full_content[1:].index("---" ) + 1
_UpperCAmelCase : Tuple = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCAmelCase__ )
class UpperCAmelCase_ ( __a ):
# class attributes
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def snake_case_ ( cls : str , A : Path ):
with open(_A , encoding="utf-8" ) as readme_file:
_UpperCAmelCase : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def snake_case_ ( self : Optional[int] , A : Path ):
if path.exists():
with open(_A , encoding="utf-8" ) as readme_file:
_UpperCAmelCase : Union[str, Any] = readme_file.read()
else:
_UpperCAmelCase : Dict = None
_UpperCAmelCase : str = self._to_readme(_A )
with open(_A , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_A )
def snake_case_ ( self : Dict , A : Optional[str] = None ):
if readme_content is not None:
_UpperCAmelCase : Optional[int] = _split_yaml_from_readme(_A )
_UpperCAmelCase : Any = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
_UpperCAmelCase : str = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def snake_case_ ( cls : int , A : str ):
_UpperCAmelCase : int = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_UpperCAmelCase : List[Any] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def snake_case_ ( self : Dict ):
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding="utf-8" , ).decode("utf-8" )
_lowerCAmelCase : int = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowerCAmelCase : List[str] = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
_lowerCAmelCase : List[str] = ap.parse_args()
_lowerCAmelCase : Optional[int] = Path(args.readme_filepath)
_lowerCAmelCase : int = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 289 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 0 |
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
snake_case : Optional[int] = """src/transformers"""
snake_case : Dict = """docs/source/en"""
snake_case : int = """."""
def A ( __snake_case: List[Any] , __snake_case: int , __snake_case: Optional[int] ) -> str:
"""simple docstring"""
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__magic_name__ = f.readlines()
# Find the start prompt.
__magic_name__ = 0
while not lines[start_index].startswith(lowerCAmelCase__ ):
start_index += 1
start_index += 1
__magic_name__ = start_index
while not lines[end_index].startswith(lowerCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
snake_case : Dict = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
snake_case : int = re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
snake_case : List[Any] = re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
snake_case : List[Any] = re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
snake_case : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
def A ( __snake_case: Tuple ) -> Tuple:
"""simple docstring"""
__magic_name__ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCAmelCase__ )
return [m.group(0 ) for m in matches]
def A ( __snake_case: int , __snake_case: Optional[int] ) -> List[Any]:
"""simple docstring"""
__magic_name__ = 2 if text == '''✅''' or text == '''❌''' else len(lowerCAmelCase__ )
__magic_name__ = (width - text_length) // 2
__magic_name__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def A ( ) -> Dict:
"""simple docstring"""
__magic_name__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__magic_name__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__magic_name__ = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
__magic_name__ = collections.defaultdict(lowerCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCAmelCase__ ):
__magic_name__ = None
if attr_name.endswith('Tokenizer' ):
__magic_name__ = slow_tokenizers
__magic_name__ = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__magic_name__ = fast_tokenizers
__magic_name__ = attr_name[:-1_3]
elif _re_tf_models.match(lowerCAmelCase__ ) is not None:
__magic_name__ = tf_models
__magic_name__ = _re_tf_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCAmelCase__ ) is not None:
__magic_name__ = flax_models
__magic_name__ = _re_flax_models.match(lowerCAmelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCAmelCase__ ) is not None:
__magic_name__ = pt_models
__magic_name__ = _re_pt_models.match(lowerCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__magic_name__ = True
break
# Try again after removing the last word in the name
__magic_name__ = ''''''.join(camel_case_split(lowerCAmelCase__ )[:-1] )
# Let's build that table!
__magic_name__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__magic_name__ = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__magic_name__ = [len(lowerCAmelCase__ ) + 2 for c in columns]
__magic_name__ = max([len(lowerCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
__magic_name__ = '''|''' + '''|'''.join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for c, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__magic_name__ = {True: '''✅''', False: '''❌'''}
for name in model_names:
__magic_name__ = model_name_to_prefix[name]
__magic_name__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCAmelCase__ , lowerCAmelCase__ ) for l, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )] ) + "|\n"
return table
def A ( __snake_case: str=False ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ = _find_text_in_file(
filename=os.path.join(lowerCAmelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__magic_name__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCAmelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
snake_case : Any = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
snake_case : Union[str, Any] = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 545 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 0 |
from __future__ import annotations
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE__ : list[list[int]] ) -> Union[str, Any]:
lowerCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(_A ) != 0:
lowerCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_A ) != cols:
raise error
for value in row:
if not isinstance(_A , (int, float) ):
raise error
lowerCAmelCase__ = rows
else:
lowerCAmelCase__ = []
def a ( self : Optional[Any] ) -> Optional[int]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def a ( self : Optional[int] ) -> List[Any]:
return len(self.rows )
@property
def a ( self : int ) -> List[str]:
return len(self.rows[0] )
@property
def a ( self : List[Any] ) -> Union[str, Any]:
return (self.num_rows, self.num_columns)
@property
def a ( self : List[Any] ) -> Tuple:
return self.order[0] == self.order[1]
def a ( self : Optional[Any] ) -> str:
lowerCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_A )
def a ( self : Any ) -> str:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def a ( self : Dict ) -> Tuple:
return bool(self.determinant() )
def a ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Dict:
lowerCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_A ).determinant()
def a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
if (row + column) % 2 == 0:
return self.get_minor(_A , _A )
return -1 * self.get_minor(_A , _A )
def a ( self : int ) -> Union[str, Any]:
return Matrix(
[
[self.get_minor(_A , _A ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def a ( self : Optional[Any] ) -> Dict:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def a ( self : Optional[Any] ) -> Optional[int]:
lowerCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_A )
def a ( self : List[Any] ) -> Tuple:
lowerCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : int ) -> Tuple:
return str(self.rows )
def __str__( self : Any ) -> List[Any]:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(_A ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int | None = None ) -> str:
lowerCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(_A , _A ):
raise type_error
for value in row:
if not isinstance(_A , (int, float) ):
raise type_error
if len(_A ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(_A )
else:
lowerCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int | None = None ) -> Dict:
lowerCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(_A , _A ):
raise type_error
for value in column:
if not isinstance(_A , (int, float) ):
raise type_error
if len(_A ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
lowerCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
lowerCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : object ) -> Optional[Any]:
if not isinstance(_A , _A ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : object ) -> Union[str, Any]:
return not self == other
def __neg__( self : str ) -> Optional[int]:
return self * -1
def __add__( self : Dict , SCREAMING_SNAKE_CASE__ : Matrix ) -> List[str]:
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : int , SCREAMING_SNAKE_CASE__ : Matrix ) -> Dict:
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE__ : Matrix | int | float ) -> Tuple:
if isinstance(_A , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_A , _A ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(_A , _A ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Tuple , SCREAMING_SNAKE_CASE__ : int ) -> List[Any]:
if not isinstance(_A , _A ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
lowerCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def a ( cls : str , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ) -> List[str]:
return sum(row[i] * column[i] for i in range(len(_A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( __a ):
_A = ["image_processor", "tokenizer"]
_A = "AutoImageProcessor"
_A = "AutoTokenizer"
def __init__( self , lowercase__ , lowercase__ ):
"""simple docstring"""
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ : str = self.image_processor
def __call__( self , lowercase__=None , lowercase__=None , lowercase__=None , **lowercase__ ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
SCREAMING_SNAKE_CASE_ : Dict = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def __lowerCamelCase ( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_A , **_A )
def __lowerCamelCase ( self , *lowercase__ , **lowercase__ ):
"""simple docstring"""
return self.tokenizer.decode(*_A , **_A )
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 421 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 0 |
'''simple docstring'''
from __future__ import annotations
from math import pi
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : Any):
if (inductance, frequency, reactance).count(0) != 1:
raise ValueError("""One and only one argument must be 0""")
if inductance < 0:
raise ValueError("""Inductance cannot be negative""")
if frequency < 0:
raise ValueError("""Frequency cannot be negative""")
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""")
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 0 |
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ : list[list[str]] =[[] for _ in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1 or len(lowerCAmelCase__ ) <= key:
return input_string
for position, character in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Dict =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Tuple =min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : int =[''''''.join(lowerCAmelCase__ ) for row in temp_grid]
SCREAMING_SNAKE_CASE_ : Dict =''''''.join(lowerCAmelCase__ )
return output_string
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> str:
SCREAMING_SNAKE_CASE_ : Optional[Any] =[]
SCREAMING_SNAKE_CASE_ : List[Any] =key - 1
if key <= 0:
raise ValueError('''Height of grid can\'t be 0 or negative''' )
if key == 1:
return input_string
SCREAMING_SNAKE_CASE_ : list[list[str]] =[[] for _ in range(lowerCAmelCase__ )] # generates template
for position in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ : Optional[int] =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : Optional[int] =min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append('''*''' )
SCREAMING_SNAKE_CASE_ : Tuple =0
for row in temp_grid: # fills in the characters
SCREAMING_SNAKE_CASE_ : Tuple =input_string[counter : counter + len(lowerCAmelCase__ )]
grid.append(list(lowerCAmelCase__ ) )
counter += len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[str] ='''''' # reads as zigzag
for position in range(len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ : int =position % (lowest * 2) # puts it in bounds
SCREAMING_SNAKE_CASE_ : int =min(lowerCAmelCase__ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[str] ) -> dict[int, str]:
SCREAMING_SNAKE_CASE_ : Tuple ={}
for key_guess in range(1 , len(lowerCAmelCase__ ) ): # tries every key
SCREAMING_SNAKE_CASE_ : Any =decrypt(lowerCAmelCase__ , lowerCAmelCase__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 0 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
SCREAMING_SNAKE_CASE = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
SCREAMING_SNAKE_CASE = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case_ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , )
def snake_case_ ( self , __A , __A ):
__a = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
__a = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 99 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase = logging.get_logger(__name__)
lowercase = {
'''ut/deta''': '''https://huggingface.co/ut/deta/resolve/main/config.json''',
}
class __lowerCamelCase ( __a ):
'''simple docstring'''
snake_case__ : Optional[Any] = '''deta'''
snake_case__ : List[Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , a__=None , a__=900 , a__=2048 , a__=6 , a__=2048 , a__=8 , a__=6 , a__=1024 , a__=8 , a__=0.0 , a__=True , a__="relu" , a__=256 , a__=0.1 , a__=0.0 , a__=0.0 , a__=0.02 , a__=1.0 , a__=True , a__=False , a__="sine" , a__=5 , a__=4 , a__=4 , a__=True , a__=300 , a__=True , a__=True , a__=1 , a__=5 , a__=2 , a__=1 , a__=1 , a__=5 , a__=2 , a__=0.1 , a__=0.25 , **a__ , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
__SCREAMING_SNAKE_CASE : Optional[int] = CONFIG_MAPPING['''resnet'''](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : str = backbone_config.pop("model_type" )
__SCREAMING_SNAKE_CASE : str = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE : Tuple = config_class.from_dict(_A )
__SCREAMING_SNAKE_CASE : List[str] = backbone_config
__SCREAMING_SNAKE_CASE : List[str] = num_queries
__SCREAMING_SNAKE_CASE : str = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = d_model
__SCREAMING_SNAKE_CASE : Any = encoder_ffn_dim
__SCREAMING_SNAKE_CASE : str = encoder_layers
__SCREAMING_SNAKE_CASE : List[Any] = encoder_attention_heads
__SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
__SCREAMING_SNAKE_CASE : List[Any] = decoder_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_attention_heads
__SCREAMING_SNAKE_CASE : Optional[int] = dropout
__SCREAMING_SNAKE_CASE : Dict = attention_dropout
__SCREAMING_SNAKE_CASE : Any = activation_dropout
__SCREAMING_SNAKE_CASE : Tuple = activation_function
__SCREAMING_SNAKE_CASE : Dict = init_std
__SCREAMING_SNAKE_CASE : Tuple = init_xavier_std
__SCREAMING_SNAKE_CASE : str = encoder_layerdrop
__SCREAMING_SNAKE_CASE : Optional[int] = auxiliary_loss
__SCREAMING_SNAKE_CASE : Dict = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE : Tuple = num_feature_levels
__SCREAMING_SNAKE_CASE : Optional[int] = encoder_n_points
__SCREAMING_SNAKE_CASE : Optional[Any] = decoder_n_points
__SCREAMING_SNAKE_CASE : List[str] = two_stage
__SCREAMING_SNAKE_CASE : List[Any] = two_stage_num_proposals
__SCREAMING_SNAKE_CASE : List[str] = with_box_refine
__SCREAMING_SNAKE_CASE : Any = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
__SCREAMING_SNAKE_CASE : Union[str, Any] = class_cost
__SCREAMING_SNAKE_CASE : Tuple = bbox_cost
__SCREAMING_SNAKE_CASE : Any = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE : List[str] = mask_loss_coefficient
__SCREAMING_SNAKE_CASE : List[Any] = dice_loss_coefficient
__SCREAMING_SNAKE_CASE : Any = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE : Union[str, Any] = giou_loss_coefficient
__SCREAMING_SNAKE_CASE : Any = eos_coefficient
__SCREAMING_SNAKE_CASE : Tuple = focal_alpha
super().__init__(is_encoder_decoder=_A , **_A )
@property
def a_ ( self ):
return self.encoder_attention_heads
@property
def a_ ( self ):
return self.d_model
def a_ ( self ):
__SCREAMING_SNAKE_CASE : Dict = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE : Tuple = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE : str = self.__class__.model_type
return output
| 211 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 0 |
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
_UpperCamelCase = logging.get_logger(__name__)
def lowerCAmelCase__( lowercase : Any , lowercase : List[Any] ) -> str:
__snake_case : str = set()
__snake_case : List[str] = []
def parse_line(lowercase : Tuple ):
for line in fp:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__snake_case : Union[str, Any] = line.decode("UTF-8" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(" " ):
# process a single warning and move it to `selected_warnings`.
if len(lowerCAmelCase__ ) > 0:
__snake_case : List[Any] = '''\n'''.join(lowerCAmelCase__ )
# Only keep the warnings specified in `targets`
if any(f""": {x}: """ in warning for x in targets ):
selected_warnings.add(lowerCAmelCase__ )
buffer.clear()
continue
else:
__snake_case : Union[str, Any] = line.strip()
buffer.append(lowerCAmelCase__ )
if from_gh:
for filename in os.listdir(lowerCAmelCase__ ):
__snake_case : List[Any] = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
else:
try:
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(lowerCAmelCase__ ) as fp:
parse_line(lowerCAmelCase__ )
except Exception:
logger.warning(
f"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def lowerCAmelCase__( lowercase : List[str] , lowercase : List[Any] ) -> Any:
__snake_case : Tuple = set()
__snake_case : Optional[int] = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if (p.endswith(".zip" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(lowerCAmelCase__ , lowerCAmelCase__ ) )
return selected_warnings
if __name__ == "__main__":
def lowerCAmelCase__( lowercase : Union[str, Any] ) -> Any:
return values.split("," )
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
_UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
_UpperCamelCase = extract_warnings(args.output_dir, args.targets)
_UpperCamelCase = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 243 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : List[str] = cva.getAffineTransform(lowerCAmelCase__ , lowerCAmelCase__ )
return cva.warpAffine(lowerCAmelCase__ , lowerCAmelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ , UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
UpperCamelCase__ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
UpperCamelCase__ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 75 | 0 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class _snake_case :
def __init__( self : Tuple ):
__lowerCamelCase : Dict = {}
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str]=1 ):
if self.graph.get(_A ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
__lowerCamelCase : Tuple = [[w, v]]
if not self.graph.get(_A ):
__lowerCamelCase : List[Any] = []
def lowerCamelCase__ ( self : Dict ):
return list(self.graph )
def lowerCamelCase__ ( self : int , UpperCAmelCase : List[str] , UpperCAmelCase : str ):
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any]=-2 , UpperCAmelCase : Optional[int]=-1 ):
if s == d:
return []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : int = []
if s == -2:
__lowerCamelCase : Tuple = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Optional[int] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : str = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
__lowerCamelCase : Dict = stack[len(_A ) - 1]
else:
__lowerCamelCase : str = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int=-1 ):
if c == -1:
__lowerCamelCase : Tuple = floor(random() * 10000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__lowerCamelCase : Union[str, Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[str]=-2 ):
__lowerCamelCase : int = deque()
__lowerCamelCase : Optional[int] = []
if s == -2:
__lowerCamelCase : Union[str, Any] = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
__lowerCamelCase : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : int , UpperCAmelCase : Union[str, Any] ):
__lowerCamelCase : List[str] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Union[str, Any] ):
return len(self.graph[u] )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Optional[Any]=-2 ):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[Any] = []
if s == -2:
__lowerCamelCase : List[str] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Any = s
__lowerCamelCase : Any = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Dict = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_A ) != 0:
__lowerCamelCase : Union[str, Any] = stack[len(_A ) - 1]
else:
__lowerCamelCase : Union[str, Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return sorted_nodes
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Any = []
__lowerCamelCase : List[str] = []
__lowerCamelCase : Union[str, Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : int = -2
__lowerCamelCase : List[str] = []
__lowerCamelCase : Optional[int] = s
__lowerCamelCase : Any = False
__lowerCamelCase : int = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Tuple = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : Dict = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : Union[str, Any] = True
if len(_A ) != 0:
__lowerCamelCase : List[str] = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[Any] = False
indirect_parents.append(_A )
__lowerCamelCase : Tuple = s
__lowerCamelCase : Tuple = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowerCamelCase__ ( self : Union[str, Any] ):
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Union[str, Any] = -2
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : List[Any] = s
__lowerCamelCase : str = False
__lowerCamelCase : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : List[str] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : Union[str, Any] = True
if len(_A ) != 0:
__lowerCamelCase : int = stack[len(_A ) - 1]
else:
__lowerCamelCase : Dict = False
indirect_parents.append(_A )
__lowerCamelCase : int = s
__lowerCamelCase : Optional[int] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Any=-2 , UpperCAmelCase : List[str]=-1 ):
__lowerCamelCase : Any = time()
self.dfs(_A , _A )
__lowerCamelCase : Union[str, Any] = time()
return end - begin
def lowerCamelCase__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int]=-2 ):
__lowerCamelCase : Dict = time()
self.bfs(_A )
__lowerCamelCase : Any = time()
return end - begin
class _snake_case :
def __init__( self : List[str] ):
__lowerCamelCase : Optional[int] = {}
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Any=1 ):
if self.graph.get(_A ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
__lowerCamelCase : Any = [[w, v]]
# add the other way
if self.graph.get(_A ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
__lowerCamelCase : List[str] = [[w, u]]
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ):
if self.graph.get(_A ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_A )
# the other way round
if self.graph.get(_A ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_A )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : int=-2 , UpperCAmelCase : Optional[int]=-1 ):
if s == d:
return []
__lowerCamelCase : int = []
__lowerCamelCase : Dict = []
if s == -2:
__lowerCamelCase : Optional[Any] = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Optional[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : Tuple = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_A )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : List[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_A ) != 0:
__lowerCamelCase : str = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return visited
def lowerCamelCase__ ( self : str , UpperCAmelCase : Tuple=-1 ):
if c == -1:
__lowerCamelCase : str = floor(random() * 10000 ) + 10
for i in range(_A ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
__lowerCamelCase : List[str] = floor(random() * c ) + 1
if n != i:
self.add_pair(_A , _A , 1 )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[str]=-2 ):
__lowerCamelCase : Optional[Any] = deque()
__lowerCamelCase : Optional[int] = []
if s == -2:
__lowerCamelCase : int = list(self.graph )[0]
d.append(_A )
visited.append(_A )
while d:
__lowerCamelCase : Dict = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : int ):
return len(self.graph[u] )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Dict = []
__lowerCamelCase : str = []
__lowerCamelCase : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Union[str, Any] = -2
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : List[Any] = s
__lowerCamelCase : Any = False
__lowerCamelCase : Any = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : int = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : Tuple = len(_A ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : Dict = True
if len(_A ) != 0:
__lowerCamelCase : Optional[int] = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[int] = False
indirect_parents.append(_A )
__lowerCamelCase : Optional[int] = s
__lowerCamelCase : Dict = ss
# check if se have reached the starting point
if len(_A ) == 0:
return list(_A )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : List[Any] = []
__lowerCamelCase : str = []
__lowerCamelCase : Any = list(self.graph )[0]
stack.append(_A )
visited.append(_A )
__lowerCamelCase : Dict = -2
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Optional[Any] = s
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Union[str, Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
__lowerCamelCase : List[str] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
__lowerCamelCase : Union[str, Any] = len(_A ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
__lowerCamelCase : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
__lowerCamelCase : List[str] = True
if len(_A ) != 0:
__lowerCamelCase : Any = stack[len(_A ) - 1]
else:
__lowerCamelCase : Optional[Any] = False
indirect_parents.append(_A )
__lowerCamelCase : List[str] = s
__lowerCamelCase : Optional[Any] = ss
# check if se have reached the starting point
if len(_A ) == 0:
return False
def lowerCamelCase__ ( self : int ):
return list(self.graph )
def lowerCamelCase__ ( self : List[Any] , UpperCAmelCase : Any=-2 , UpperCAmelCase : List[str]=-1 ):
__lowerCamelCase : Union[str, Any] = time()
self.dfs(_A , _A )
__lowerCamelCase : Tuple = time()
return end - begin
def lowerCamelCase__ ( self : int , UpperCAmelCase : str=-2 ):
__lowerCamelCase : Tuple = time()
self.bfs(_A )
__lowerCamelCase : int = time()
return end - begin | 646 |
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __a ):
"""simple docstring"""
UpperCamelCase_ : int = (DDPMScheduler,)
def _lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase__ : List[str] ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_A )
return config
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_A )
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def _lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=_A )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_0979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Any = scheduler_class(**_A )
_UpperCAmelCase : List[str] = len(_A )
_UpperCAmelCase : Tuple = self.dummy_model()
_UpperCAmelCase : List[str] = self.dummy_sample_deter
_UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
_UpperCAmelCase : int = model(_A , _A )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : List[Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Dict = pred_prev_sample
_UpperCAmelCase : List[Any] = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def _lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config(prediction_type="v_prediction" )
_UpperCAmelCase : Tuple = scheduler_class(**_A )
_UpperCAmelCase : Optional[Any] = len(_A )
_UpperCAmelCase : Any = self.dummy_model()
_UpperCAmelCase : Union[str, Any] = self.dummy_sample_deter
_UpperCAmelCase : Tuple = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
_UpperCAmelCase : int = model(_A , _A )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase : Dict = pred_prev_sample
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(_A ) )
_UpperCAmelCase : List[str] = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**_A )
_UpperCAmelCase : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=_A )
_UpperCAmelCase : Tuple = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
_UpperCAmelCase : Optional[int] = -1
else:
_UpperCAmelCase : Union[str, Any] = timesteps[i + 1]
_UpperCAmelCase : str = scheduler.previous_timestep(_A )
_UpperCAmelCase : List[Any] = prev_t.item()
self.assertEqual(_A , _A )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**_A )
_UpperCAmelCase : int = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(_A , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_A )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**_A )
_UpperCAmelCase : Dict = [1_0_0, 8_7, 5_0, 1, 0]
_UpperCAmelCase : List[Any] = len(_A )
with self.assertRaises(_A , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : List[str] = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**_A )
_UpperCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_A ) | 494 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( __a ):
def __init__( self : Dict , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_A , scheduler=_A )
@torch.no_grad()
def __call__( self : List[Any] , _A : int = 1 , _A : int = 100 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[float] = None , _A : bool = True , ):
'''simple docstring'''
if audio_length_in_s is None:
UpperCAmelCase__ : List[str] = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase__ : Union[str, Any] = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase__ : List[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
UpperCAmelCase__ : List[Any] = int(_A )
if sample_size % down_scale_factor != 0:
UpperCAmelCase__ : int = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
''' process.''' )
UpperCAmelCase__ : Dict = int(_A )
UpperCAmelCase__ : Optional[Any] = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase__ : int = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_A , _A ) and len(_A ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_A )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase__ : Optional[int] = randn_tensor(_A , generator=_A , device=self.device , dtype=_A )
# set step values
self.scheduler.set_timesteps(_A , device=audio.device )
UpperCAmelCase__ : List[str] = self.scheduler.timesteps.to(_A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase__ : Optional[int] = self.unet(_A , _A ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase__ : List[Any] = self.scheduler.step(_A , _A , _A ).prev_sample
UpperCAmelCase__ : Any = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase__ : Any = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_A )
| 75 | 0 |
"""simple docstring"""
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class UpperCAmelCase_ ( nn.Module ):
def __init__( self : int ):
super().__init__()
_UpperCAmelCase : str = nn.Linear(3 , 4 )
_UpperCAmelCase : Optional[int] = nn.BatchNormad(4 )
_UpperCAmelCase : Dict = nn.Linear(4 , 5 )
def snake_case_ ( self : str , A : List[str] ):
return self.lineara(self.batchnorm(self.lineara(_A ) ) )
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Any ):
_UpperCAmelCase : Tuple = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , model.state_dict() )
_UpperCAmelCase : Dict = os.path.join(_A , "index.json" )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_UpperCAmelCase : Optional[int] = os.path.join(_A , f'{key}.dat' )
self.assertTrue(os.path.isfile(_A ) )
# TODO: add tests on the fact weights are properly loaded
def snake_case_ ( self : int ):
_UpperCAmelCase : Tuple = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_UpperCAmelCase : List[str] = torch.randn(2 , 3 , dtype=_A )
with TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : List[str] = offload_weight(_A , "weight" , _A , {} )
_UpperCAmelCase : Optional[int] = os.path.join(_A , "weight.dat" )
self.assertTrue(os.path.isfile(_A ) )
self.assertDictEqual(_A , {"weight": {"shape": [2, 3], "dtype": str(_A ).split("." )[1]}} )
_UpperCAmelCase : List[Any] = load_offloaded_weight(_A , index["weight"] )
self.assertTrue(torch.equal(_A , _A ) )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = ModelForTest()
_UpperCAmelCase : Optional[Any] = model.state_dict()
_UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if '''linear2''' not in k}
_UpperCAmelCase : int = {k: v for k, v in state_dict.items() if '''linear2''' in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
_UpperCAmelCase : Dict = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
_UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if '''weight''' in k}
_UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if '''weight''' not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
_UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_A , _A )
# Duplicates are removed
_UpperCAmelCase : int = OffloadedWeightsLoader(state_dict=_A , save_folder=_A )
# Every key is there with the right value
self.assertEqual(sorted(_A ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_A , weight_map[key] ) )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2}
_UpperCAmelCase : int = extract_submodules_state_dict(_A , ["a.1", "a.2"] )
self.assertDictEqual(_A , {"a.1": 0, "a.2": 2} )
_UpperCAmelCase : Any = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2}
_UpperCAmelCase : int = extract_submodules_state_dict(_A , ["a.1", "a.2"] )
self.assertDictEqual(_A , {"a.1.a": 0, "a.2.a": 2} )
| 289 |
'''simple docstring'''
from math import factorial
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 75 | 0 |
"""simple docstring"""
def A ( __snake_case: str ) -> str:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
__magic_name__ = len(lowerCAmelCase__ )
__magic_name__ = max(lowerCAmelCase__ )
__magic_name__ = min(lowerCAmelCase__ )
# create the counting array
__magic_name__ = coll_max + 1 - coll_min
__magic_name__ = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCAmelCase__ ):
__magic_name__ = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__magic_name__ = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCAmelCase__ ) ):
__magic_name__ = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def A ( __snake_case: Dict ) -> Dict:
"""simple docstring"""
return "".join([chr(lowerCAmelCase__ ) for i in counting_sort([ord(lowerCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
snake_case : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
snake_case : str = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 545 |
'''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , _A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **_A : int , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase__ : List[str] = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : str = do_resize
UpperCAmelCase__ : List[Any] = size
UpperCAmelCase__ : int = resample
UpperCAmelCase__ : int = do_center_crop
UpperCAmelCase__ : List[str] = crop_size
UpperCAmelCase__ : Union[str, Any] = do_rescale
UpperCAmelCase__ : Optional[int] = rescale_factor
UpperCAmelCase__ : List[Any] = do_normalize
UpperCAmelCase__ : Dict = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase__ : Dict = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase_ ( self : str , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase__ : Tuple = int((256 / 224) * size['''shortest_edge'''] )
UpperCAmelCase__ : Tuple = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
UpperCAmelCase__ : Dict = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_A , size=(size_dict['''height'''], size_dict['''width''']) , resample=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def lowercase_ ( self : List[str] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Dict , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def lowercase_ ( self : Dict , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Optional[int] , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def lowercase_ ( self : Optional[Any] , _A : ImageInput , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : PILImageResampling = None , _A : Optional[bool] = None , _A : Optional[Dict[str, int]] = None , _A : Optional[bool] = None , _A : Optional[float] = None , _A : Optional[bool] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[Union[float, Iterable[float]]] = None , _A : Optional[TensorType] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ):
'''simple docstring'''
UpperCAmelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase__ : Optional[int] = resample if resample is not None else self.resample
UpperCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase__ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase__ : Tuple = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase__ : List[str] = image_std if image_std is not None else self.image_std
UpperCAmelCase__ : Tuple = size if size is not None else self.size
UpperCAmelCase__ : int = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase__ : int = get_size_dict(_A , param_name='''crop_size''' )
UpperCAmelCase__ : Union[str, Any] = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase__ : int = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase__ : str = [self.resize(_A , _A , _A ) for image in images]
if do_center_crop:
UpperCAmelCase__ : Tuple = [self.center_crop(_A , _A ) for image in images]
if do_rescale:
UpperCAmelCase__ : Optional[int] = [self.rescale(_A , _A ) for image in images]
if do_normalize:
UpperCAmelCase__ : Any = [self.normalize(_A , _A , _A ) for image in images]
UpperCAmelCase__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase__ : Dict = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 75 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowerCamelCase ( __a ):
"""simple docstring"""
snake_case__ = "openai-gpt"
snake_case__ = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=40_478 , SCREAMING_SNAKE_CASE__ : List[str]=512 , SCREAMING_SNAKE_CASE__ : Dict=768 , SCREAMING_SNAKE_CASE__ : List[str]=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : Tuple=1e-5 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE__ : str="cls_index" , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : Dict=0.1 , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any:
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = n_positions
lowerCAmelCase__ = n_embd
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = afn
lowerCAmelCase__ = resid_pdrop
lowerCAmelCase__ = embd_pdrop
lowerCAmelCase__ = attn_pdrop
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = summary_type
lowerCAmelCase__ = summary_use_proj
lowerCAmelCase__ = summary_activation
lowerCAmelCase__ = summary_first_dropout
lowerCAmelCase__ = summary_proj_to_labels
super().__init__(**_A )
| 61 |
'''simple docstring'''
import math
def a__ ( ) -> None:
UpperCAmelCase__ : List[str] = input('''Enter message: ''' )
UpperCAmelCase__ : Any = int(input(F"""Enter key [2-{len(lowerCAmelCase__ ) - 1}]: """ ) )
UpperCAmelCase__ : List[str] = input('''Encryption/Decryption [e/d]: ''' )
if mode.lower().startswith('''e''' ):
UpperCAmelCase__ : Dict = encrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
elif mode.lower().startswith('''d''' ):
UpperCAmelCase__ : Optional[int] = decrypt_message(lowerCAmelCase__ , lowerCAmelCase__ )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F"""Output:\n{text + "|"}""" )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = [''''''] * key
for col in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = col
while pointer < len(lowerCAmelCase__ ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(lowerCAmelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : int = math.ceil(len(lowerCAmelCase__ ) / key )
UpperCAmelCase__ : Any = key
UpperCAmelCase__ : Optional[int] = (num_cols * num_rows) - len(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = [''''''] * num_cols
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : List[Any] = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
UpperCAmelCase__ : Optional[int] = 0
row += 1
return "".join(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75 | 0 |
'''simple docstring'''
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
snake_case_ = {
'E': 1_2.7_0,
'T': 9.0_6,
'A': 8.1_7,
'O': 7.5_1,
'I': 6.9_7,
'N': 6.7_5,
'S': 6.3_3,
'H': 6.0_9,
'R': 5.9_9,
'D': 4.2_5,
'L': 4.0_3,
'C': 2.7_8,
'U': 2.7_6,
'M': 2.4_1,
'W': 2.3_6,
'F': 2.2_3,
'G': 2.0_2,
'Y': 1.9_7,
'P': 1.9_3,
'B': 1.2_9,
'V': 0.9_8,
'K': 0.7_7,
'J': 0.1_5,
'X': 0.1_5,
'Q': 0.1_0,
'Z': 0.0_7,
}
snake_case_ = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
snake_case_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> dict[str, int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> str:
"""simple docstring"""
return x[0]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = get_letter_count(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = ''''''.join(freq_to_letter[freq] )
SCREAMING_SNAKE_CASE_ : List[Any] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_frequency_order(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421 |
'''simple docstring'''
class lowerCamelCase_ :
def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = name
UpperCAmelCase__ : Union[str, Any] = val
def __str__( self : Tuple ):
'''simple docstring'''
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
return self.val < other.val
class lowerCamelCase_ :
def __init__( self : int , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = {}
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : Any = self.build_heap(_A )
def __getitem__( self : Any , _A : Any ):
'''simple docstring'''
return self.get_value(_A )
def lowercase_ ( self : Any , _A : List[Any] ):
'''simple docstring'''
return (idx - 1) // 2
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ):
'''simple docstring'''
return idx * 2 + 1
def lowercase_ ( self : Tuple , _A : List[Any] ):
'''simple docstring'''
return idx * 2 + 2
def lowercase_ ( self : List[str] , _A : Tuple ):
'''simple docstring'''
return self.heap_dict[key]
def lowercase_ ( self : str , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = len(_A ) - 1
UpperCAmelCase__ : Tuple = self.get_parent_idx(_A )
for idx, i in enumerate(_A ):
UpperCAmelCase__ : Dict = idx
UpperCAmelCase__ : Optional[Any] = i.val
for i in range(_A , -1 , -1 ):
self.sift_down(_A , _A )
return array
def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ):
'''simple docstring'''
while True:
UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741
UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A )
UpperCAmelCase__ : Tuple = idx
if l < len(_A ) and array[l] < array[idx]:
UpperCAmelCase__ : int = l
if r < len(_A ) and array[r] < array[smallest]:
UpperCAmelCase__ : Dict = r
if smallest != idx:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx]
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
UpperCAmelCase__ : str = smallest
else:
break
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : str = self.get_parent_idx(_A )
while p >= 0 and self.heap[p] > self.heap[idx]:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p]
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
UpperCAmelCase__ : Union[str, Any] = p
UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return self.heap[0]
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0]
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
UpperCAmelCase__ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase_ ( self : int , _A : Union[str, Any] ):
'''simple docstring'''
self.heap.append(_A )
UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1
UpperCAmelCase__ : Optional[Any] = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase_ ( self : str ):
'''simple docstring'''
return len(self.heap ) == 0
def lowercase_ ( self : int , _A : Optional[Any] , _A : str ):
'''simple docstring'''
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
UpperCAmelCase__ : Optional[Any] = new_value
UpperCAmelCase__ : List[str] = new_value
self.sift_up(self.idx_of_element[node] )
UpperCamelCase__ = Node('''R''', -1)
UpperCamelCase__ = Node('''B''', 6)
UpperCamelCase__ = Node('''A''', 3)
UpperCamelCase__ = Node('''X''', 1)
UpperCamelCase__ = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
UpperCamelCase__ = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def _a ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Tuple = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Any = self.dummy_uncond_unet
A_ : str = ScoreSdeVeScheduler()
A_ : Tuple = ScoreSdeVePipeline(unet=_A ,scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
A_ : int = torch.manual_seed(0 )
A_ : Any = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_A ).images
A_ : Tuple = torch.manual_seed(0 )
A_ : Dict = sde_ve(num_inference_steps=2 ,output_type="""numpy""" ,generator=_A ,return_dict=_A )[
0
]
A_ : Optional[Any] = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Union[str, Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = '''google/ncsnpp-church-256'''
A_ : Dict = UNetaDModel.from_pretrained(_A )
A_ : Optional[Any] = ScoreSdeVeScheduler.from_pretrained(_A )
A_ : int = ScoreSdeVePipeline(unet=_A ,scheduler=_A )
sde_ve.to(_A )
sde_ve.set_progress_bar_config(disable=_A )
A_ : Any = torch.manual_seed(0 )
A_ : int = sde_ve(num_inference_steps=10 ,output_type="""numpy""" ,generator=_A ).images
A_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A_ : List[str] = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 665 |
'''simple docstring'''
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCamelCase__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
UpperCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCamelCase__ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCamelCase__ = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def a__ ( lowerCAmelCase__ ) -> List[str]:
UpperCAmelCase__ : str = None
# source code of `config_class`
UpperCAmelCase__ : str = inspect.getsource(lowerCAmelCase__ )
UpperCAmelCase__ : List[Any] = _re_checkpoint.findall(lowerCAmelCase__ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCAmelCase__ : List[str] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase__ : Union[str, Any] = F"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase__ : Any = ckpt_name
break
return checkpoint
def a__ ( ) -> Dict:
UpperCAmelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCAmelCase__ : Any = get_checkpoint_from_config_class(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
UpperCAmelCase__ : List[str] = '''\n'''.join(sorted(lowerCAmelCase__ ) )
raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 75 | 0 |
from typing import Any
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , ) -> list:
_validation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
# Creates data structures and fill initial step
SCREAMING_SNAKE_CASE_ : dict ={}
SCREAMING_SNAKE_CASE_ : dict ={}
for state in states_space:
SCREAMING_SNAKE_CASE_ : str =observations_space[0]
SCREAMING_SNAKE_CASE_ : List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE_ : Dict =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase__ ) ):
SCREAMING_SNAKE_CASE_ : List[str] =observations_space[o]
SCREAMING_SNAKE_CASE_ : Optional[int] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
SCREAMING_SNAKE_CASE_ : Optional[int] =''''''
SCREAMING_SNAKE_CASE_ : Tuple =-1
for k_state in states_space:
SCREAMING_SNAKE_CASE_ : Any =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
SCREAMING_SNAKE_CASE_ : Optional[int] =probability
SCREAMING_SNAKE_CASE_ : Any =k_state
# Update probabilities and pointers dicts
SCREAMING_SNAKE_CASE_ : Union[str, Any] =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
SCREAMING_SNAKE_CASE_ : Optional[Any] =arg_max
# The final observation
SCREAMING_SNAKE_CASE_ : str =observations_space[len(lowerCAmelCase__ ) - 1]
# argmax for given final observation
SCREAMING_SNAKE_CASE_ : int =''''''
SCREAMING_SNAKE_CASE_ : Optional[Any] =-1
for k_state in states_space:
SCREAMING_SNAKE_CASE_ : Optional[int] =probabilities[(k_state, final_observation)]
if probability > max_probability:
SCREAMING_SNAKE_CASE_ : Optional[Any] =probability
SCREAMING_SNAKE_CASE_ : List[Any] =k_state
SCREAMING_SNAKE_CASE_ : Union[str, Any] =arg_max
# Process pointers backwards
SCREAMING_SNAKE_CASE_ : Union[str, Any] =last_state
SCREAMING_SNAKE_CASE_ : int =[]
for o in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , ) -> None:
_validate_not_empty(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
_validate_lists(lowerCAmelCase__ , lowerCAmelCase__ )
_validate_dicts(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int , ) -> None:
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ) -> None:
_validate_list(lowerCAmelCase__ , '''observations_space''' )
_validate_list(lowerCAmelCase__ , '''states_space''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : int =f'{var_name} must be a list'
raise ValueError(lowerCAmelCase__ )
else:
for x in _object:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Tuple =f'{var_name} must be a list of strings'
raise ValueError(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , ) -> None:
_validate_dict(lowerCAmelCase__ , '''initial_probabilities''' , lowerCAmelCase__ )
_validate_nested_dict(lowerCAmelCase__ , '''transition_probabilities''' )
_validate_nested_dict(lowerCAmelCase__ , '''emission_probabilities''' )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ) -> None:
_validate_dict(_object , lowerCAmelCase__ , lowerCAmelCase__ )
for x in _object.values():
_validate_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int = False ) -> None:
if not isinstance(_object , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] =f'{var_name} must be a dict'
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object ):
SCREAMING_SNAKE_CASE_ : int =f'{var_name} all keys must be strings'
raise ValueError(lowerCAmelCase__ )
if not all(isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for x in _object.values() ):
SCREAMING_SNAKE_CASE_ : str ='''nested dictionary ''' if nested else ''''''
SCREAMING_SNAKE_CASE_ : Dict =f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 443 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=__a ):
lowerCAmelCase__ = ['torch', 'torchsde']
def __init__( self : Tuple , *_A : Any , **_A : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *_A : Tuple , **_A : Tuple ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
@classmethod
def lowercase_ ( cls : List[str] , *_A : Optional[int] , **_A : Any ):
'''simple docstring'''
requires_backends(cls , ['''torch''', '''torchsde'''] )
| 75 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a (lowerCAmelCase__ ):
__a = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
def a (lowerCAmelCase__ ):
__a = emb.weight.shape
__a = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__a = emb.weight.data
return lin_layer
def a (lowerCAmelCase__ , lowerCAmelCase__=None ):
__a = {}
for old_key in state_dict.keys():
__a = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__a = key.replace("""moe_layer.experts.0""" , f'''ffn.experts.expert_{expert_idx}''' )
else:
__a = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
__a = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
__a = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
__a = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
__a = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
__a = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
__a = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
__a = state_dict[old_key]
return new_dict
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = WEIGHTS_NAME ):
__a = []
__a = 0
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
for expert in range(lowerCAmelCase__ ):
__a = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(lowerCAmelCase__ ):
__a = torch.load(lowerCAmelCase__ )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
__a = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
__a = os.path.join(
lowerCAmelCase__ , weights_name.replace(""".bin""" , f'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCAmelCase__ )[0]].dtype )
# Add the last block
__a = os.path.join(lowerCAmelCase__ , weights_name.replace(""".bin""" , f'''-{len(lowerCAmelCase__ )+1:05d}-of-???.bin''' ) )
__a = torch.load(switch_checkpoint_path + """-shared.pt""" )['''model''']
remove_ignore_keys_(lowerCAmelCase__ )
__a = rename_fairseq_keys(lowerCAmelCase__ , lowerCAmelCase__ )
__a = shared_weights['''decoder.embed_tokens.weight''']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCAmelCase__ ) == 1:
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
# Otherwise, let's build the index
__a = {}
for idx, shard in enumerate(lowerCAmelCase__ ):
__a = weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-{len(lowerCAmelCase__ ):05d}.bin''' )
__a = os.path.join(lowerCAmelCase__ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) )
for key in shard:
__a = shard_file
# Add the metadata
__a = {'''total_size''': total_size}
__a = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , """w""" , encoding="""utf-8""" ) as f:
__a = json.dumps(lowerCAmelCase__ , indent=2 , sort_keys=lowerCAmelCase__ ) + '''\n'''
f.write(lowerCAmelCase__ )
return metadata, index
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
SCREAMING_SNAKE_CASE = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
SCREAMING_SNAKE_CASE = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 99 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'ctrl'
lowerCAmelCase__ = ['past_key_values']
lowerCAmelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[Any] , _A : Dict=246_534 , _A : Optional[Any]=256 , _A : Dict=1_280 , _A : List[str]=8_192 , _A : Tuple=48 , _A : Optional[Any]=16 , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[str]=1e-6 , _A : Optional[int]=0.0_2 , _A : Tuple=True , **_A : Optional[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Any = n_positions
UpperCAmelCase__ : Optional[Any] = n_embd
UpperCAmelCase__ : List[str] = n_layer
UpperCAmelCase__ : Any = n_head
UpperCAmelCase__ : int = dff
UpperCAmelCase__ : str = resid_pdrop
UpperCAmelCase__ : Tuple = embd_pdrop
UpperCAmelCase__ : int = layer_norm_epsilon
UpperCAmelCase__ : Tuple = initializer_range
UpperCAmelCase__ : Union[str, Any] = use_cache
super().__init__(**_A )
| 75 | 0 |
'''simple docstring'''
from math import factorial
def __A ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
__SCREAMING_SNAKE_CASE : Any = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__SCREAMING_SNAKE_CASE : Any = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 211 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def lowercase_ ( self : List[Any] , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCAmelCase__ : int = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=_A , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=_A , py_version='''py36''' , )
def lowercase_ ( self : Optional[int] , _A : Any ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def lowercase_ ( self : Optional[int] , _A : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.create_estimator(_A )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ : Dict = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase__ : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 75 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCAmelCase__( lowercase : str , lowercase : List[Any] ) -> List[str]:
__snake_case : Optional[Any] = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1E-5,
'''token_type_vocab_size''': 2,
}
__snake_case : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__snake_case : Tuple = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCAmelCase__ , output_all_encodings=lowerCAmelCase__ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCAmelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__snake_case : Union[str, Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
__snake_case : Tuple = os.path.join(get_home_dir() , "models" )
__snake_case : Tuple = _load_vocab(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , cls=lowerCAmelCase__ )
__snake_case : List[str] = nlp.model.BERTModel(
lowerCAmelCase__ , len(lowerCAmelCase__ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCAmelCase__ , use_token_type_embed=lowerCAmelCase__ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCAmelCase__ , use_decoder=lowerCAmelCase__ , )
original_bort.load_parameters(lowerCAmelCase__ , cast_dtype=lowerCAmelCase__ , ignore_extra=lowerCAmelCase__ )
__snake_case : Optional[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__snake_case : List[str] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.0_2,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(lowerCAmelCase__ ),
}
__snake_case : Tuple = BertConfig.from_dict(lowerCAmelCase__ )
__snake_case : Optional[Any] = BertForMaskedLM(lowerCAmelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowercase : Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowercase : int , lowercase : int ):
__snake_case : Optional[Any] = hf_param.shape
__snake_case : Optional[int] = to_torch(params[gluon_param] )
__snake_case : List[str] = gluon_param.shape
assert (
shape_hf == shape_gluon
), f"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__snake_case : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__snake_case : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__snake_case : Any = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__snake_case : Optional[int] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__snake_case : Any = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__snake_case : BertSelfAttention = layer.attention.self
__snake_case : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__snake_case : Optional[Any] = check_and_map_params(
self_attn.key.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__snake_case : Optional[Any] = check_and_map_params(
self_attn.query.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__snake_case : Dict = check_and_map_params(
self_attn.query.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__snake_case : Any = check_and_map_params(
self_attn.value.bias.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__snake_case : Optional[Any] = check_and_map_params(
self_attn.value.weight.data , f"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__snake_case : BertSelfOutput = layer.attention.output
__snake_case : List[Any] = check_and_map_params(
self_output.dense.bias , f"""encoder.transformer_cells.{i}.proj.bias""" )
__snake_case : Tuple = check_and_map_params(
self_output.dense.weight , f"""encoder.transformer_cells.{i}.proj.weight""" )
__snake_case : Optional[int] = check_and_map_params(
self_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__snake_case : Optional[Any] = check_and_map_params(
self_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__snake_case : BertIntermediate = layer.intermediate
__snake_case : List[Any] = check_and_map_params(
intermediate.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__snake_case : List[str] = check_and_map_params(
intermediate.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__snake_case : BertOutput = layer.output
__snake_case : str = check_and_map_params(
bert_output.dense.bias , f"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__snake_case : Tuple = check_and_map_params(
bert_output.dense.weight , f"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__snake_case : Tuple = check_and_map_params(
bert_output.LayerNorm.bias , f"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__snake_case : Any = check_and_map_params(
bert_output.LayerNorm.weight , f"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__snake_case : Optional[int] = RobertaTokenizer.from_pretrained("roberta-base" )
__snake_case : Tuple = tokenizer.encode_plus(lowerCAmelCase__ )['''input_ids''']
# Get gluon output
__snake_case : Union[str, Any] = mx.nd.array([input_ids] )
__snake_case : Optional[int] = original_bort(inputs=lowerCAmelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase__ )
__snake_case : List[Any] = BertModel.from_pretrained(lowerCAmelCase__ )
hf_bort_model.eval()
__snake_case : Union[str, Any] = tokenizer.encode_plus(lowerCAmelCase__ , return_tensors="pt" )
__snake_case : List[str] = hf_bort_model(**lowerCAmelCase__ )[0]
__snake_case : List[str] = output_gluon[0].asnumpy()
__snake_case : Any = output_hf[0].detach().numpy()
__snake_case : Any = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__snake_case : List[Any] = np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCAmelCase__ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_UpperCamelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 243 |
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
UpperCamelCase__ = '''\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
'''
UpperCamelCase__ = '''\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.
'''
UpperCamelCase__ = R'''
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting "1/2" to "\\frac{1}{2}")
Examples:
>>> metric = datasets.load_metric("competition_math")
>>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])
>>> print(results)
{\'accuracy\': 1.0}
'''
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
def lowercase_ ( self : Dict ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def lowercase_ ( self : Any , _A : str , _A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = 0.0
for i, j in zip(_A , _A ):
n_correct += 1.0 if math_equivalence.is_equiv(_A , _A ) else 0.0
UpperCAmelCase__ : Dict = n_correct / len(_A )
return {
"accuracy": accuracy,
}
| 75 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__A = logging.get_logger(__name__)
__A = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
__A = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
__A = {
'''facebook/bart-base''': 1024,
'''facebook/bart-large''': 1024,
'''facebook/bart-large-mnli''': 1024,
'''facebook/bart-large-cnn''': 1024,
'''facebook/bart-large-xsum''': 1024,
'''yjernite/bart_eli5''': 1024,
}
class _snake_case ( __a ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ["input_ids", "attention_mask"]
snake_case__ = BartTokenizer
def __init__( self : Tuple , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Tuple="replace" , UpperCAmelCase : Optional[Any]="<s>" , UpperCAmelCase : int="</s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : Optional[int]="<unk>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : str="<mask>" , UpperCAmelCase : Dict=False , UpperCAmelCase : int=True , **UpperCAmelCase : Optional[Any] , ):
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _A ) != add_prefix_space:
__lowerCamelCase : str = getattr(_A , pre_tok_state.pop("type" ) )
__lowerCamelCase : Any = add_prefix_space
__lowerCamelCase : str = pre_tok_class(**_A )
__lowerCamelCase : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCamelCase : Optional[Any] = '''post_processor'''
__lowerCamelCase : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
__lowerCamelCase : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCamelCase : Union[str, Any] = tuple(state["sep"] )
if "cls" in state:
__lowerCamelCase : Union[str, Any] = tuple(state["cls"] )
__lowerCamelCase : Dict = False
if state.get("add_prefix_space" , _A ) != add_prefix_space:
__lowerCamelCase : Union[str, Any] = add_prefix_space
__lowerCamelCase : Dict = True
if state.get("trim_offsets" , _A ) != trim_offsets:
__lowerCamelCase : List[Any] = trim_offsets
__lowerCamelCase : List[Any] = True
if changes_to_apply:
__lowerCamelCase : Dict = getattr(_A , state.pop("type" ) )
__lowerCamelCase : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowerCamelCase__ ( self : Dict ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[Any] ):
__lowerCamelCase : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
__lowerCamelCase : str = value
def lowerCamelCase__ ( self : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ):
__lowerCamelCase : Any = kwargs.get("is_split_into_words" , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*_A , **_A )
def lowerCamelCase__ ( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[Any] ):
__lowerCamelCase : Optional[Any] = kwargs.get("is_split_into_words" , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*_A , **_A )
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
__lowerCamelCase : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int]=None ):
__lowerCamelCase : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
__lowerCamelCase : Optional[int] = [self.sep_token_id]
__lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 646 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json''',
},
}
UpperCamelCase__ = {
'''facebook/bart-base''': 1_0_2_4,
'''facebook/bart-large''': 1_0_2_4,
'''facebook/bart-large-mnli''': 1_0_2_4,
'''facebook/bart-large-cnn''': 1_0_2_4,
'''facebook/bart-large-xsum''': 1_0_2_4,
'''yjernite/bart_eli5''': 1_0_2_4,
}
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ['input_ids', 'attention_mask']
lowerCAmelCase__ = BartTokenizer
def __init__( self : Tuple , _A : List[str]=None , _A : Optional[Any]=None , _A : Union[str, Any]=None , _A : Tuple="replace" , _A : Optional[Any]="<s>" , _A : int="</s>" , _A : Optional[Any]="</s>" , _A : List[str]="<s>" , _A : Optional[int]="<unk>" , _A : Optional[int]="<pad>" , _A : str="<mask>" , _A : Dict=False , _A : int=True , **_A : Optional[Any] , ):
'''simple docstring'''
super().__init__(
_A , _A , tokenizer_file=_A , errors=_A , bos_token=_A , eos_token=_A , sep_token=_A , cls_token=_A , unk_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , trim_offsets=_A , **_A , )
UpperCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : str = getattr(_A , pre_tok_state.pop('''type''' ) )
UpperCAmelCase__ : Any = add_prefix_space
UpperCAmelCase__ : str = pre_tok_class(**_A )
UpperCAmelCase__ : Dict = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase__ : Optional[Any] = '''post_processor'''
UpperCAmelCase__ : List[Any] = getattr(self.backend_tokenizer , _A , _A )
if tokenizer_component_instance:
UpperCAmelCase__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase__ : Union[str, Any] = tuple(state['''cls'''] )
UpperCAmelCase__ : Dict = False
if state.get('''add_prefix_space''' , _A ) != add_prefix_space:
UpperCAmelCase__ : Union[str, Any] = add_prefix_space
UpperCAmelCase__ : Dict = True
if state.get('''trim_offsets''' , _A ) != trim_offsets:
UpperCAmelCase__ : List[Any] = trim_offsets
UpperCAmelCase__ : List[Any] = True
if changes_to_apply:
UpperCAmelCase__ : Dict = getattr(_A , state.pop('''type''' ) )
UpperCAmelCase__ : Union[str, Any] = component_class(**_A )
setattr(self.backend_tokenizer , _A , _A )
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self : Dict , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else value
UpperCAmelCase__ : str = value
def lowercase_ ( self : Optional[int] , *_A : List[str] , **_A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[Any] , *_A : Union[str, Any] , **_A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = kwargs.get('''is_split_into_words''' , _A )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_A , **_A )
def lowercase_ ( self : Optional[int] , _A : str , _A : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : str = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
def lowercase_ ( self : Tuple , _A : Union[str, Any] , _A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase__ : Any = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : int , _A : List[int] , _A : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = [self.sep_token_id]
UpperCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 75 | 0 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__a = {
# 1536-bit
5: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 2048-bit
14: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AACAA68FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 3072-bit
15: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 4096-bit
16: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'
+ 'FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 6144-bit
17: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'
+ '8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'
+ '302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'
+ 'A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'
+ '49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'
+ 'FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'
+ '180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'
+ '3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'
+ '04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'
+ 'B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'
+ '1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'
+ 'E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'
+ '99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'
+ '04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'
+ '233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'
+ 'D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'
+ 'AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'
+ 'DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'
+ '2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'
+ 'F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'
+ 'BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'
+ 'B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'
+ '387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'
+ '6DCC4024FFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
# 8192-bit
18: {
'prime': int(
'FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'
+ '29024E088A67CC74020BBEA63B139B22514A08798E3404DD'
+ 'EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'
+ 'E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'
+ 'EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'
+ 'C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'
+ '83655D23DCA3AD961C62F356208552BB9ED529077096966D'
+ '670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'
+ 'E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'
+ 'DE2BCBF6955817183995497CEA956AE515D2261898FA0510'
+ '15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'
+ 'ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'
+ 'ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'
+ 'F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'
+ 'BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'
+ '43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'
+ '88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'
+ '2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'
+ '287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'
+ '1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'
+ '93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'
+ '36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'
+ 'F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'
+ '179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'
+ 'DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'
+ '5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'
+ 'D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'
+ '23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'
+ 'CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'
+ '06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'
+ 'DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'
+ '12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'
+ '38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'
+ '741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'
+ '3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'
+ '22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'
+ '4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'
+ '062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'
+ '4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'
+ 'B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'
+ '4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'
+ '9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'
+ '60C980DD98EDD3DFFFFFFFFFFFFFFFFF',
base=16,
),
'generator': 2,
},
}
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : int = 1_4 ) -> Union[str, Any]:
"""simple docstring"""
if group not in primes:
raise ValueError("Unsupported Group" )
_UpperCAmelCase : int = primes[group]['''prime''']
_UpperCAmelCase : Optional[Any] = primes[group]['''generator''']
_UpperCAmelCase : str = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return hex(self.__private_key )[2:]
def _lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = pow(self.generator , self.__private_key , self.prime )
return hex(_A )[2:]
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : int ) -> Optional[Any]:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(_A , (self.prime - 1) // 2 , self.prime ) == 1
)
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = int(_A , base=1_6 )
if not self.is_valid_public_key(_A ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : Union[str, Any] = pow(_A , self.__private_key , self.prime )
return shaaaa(str(_A ).encode() ).hexdigest()
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> List[Any]:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(_A , (prime - 1) // 2 , _A ) == 1
)
@staticmethod
def _lowerCAmelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 1_4 ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = int(_A , base=1_6 )
_UpperCAmelCase : List[Any] = int(_A , base=1_6 )
_UpperCAmelCase : List[Any] = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_A , _A ):
raise ValueError("Invalid public key" )
_UpperCAmelCase : List[Any] = pow(_A , _A , _A )
return shaaaa(str(_A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 494 |
'''simple docstring'''
import random
from typing import Any
def a__ ( lowerCAmelCase__ ) -> list[Any]:
for _ in range(len(lowerCAmelCase__ ) ):
UpperCAmelCase__ : int = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ : Optional[int] = random.randint(0 , len(lowerCAmelCase__ ) - 1 )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase__ = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase__ = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 75 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Dict = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289 |
'''simple docstring'''
import math
def a__ ( lowerCAmelCase__ ) -> list[int]:
UpperCAmelCase__ : List[Any] = []
UpperCAmelCase__ : Dict = 2
UpperCAmelCase__ : Optional[Any] = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase__ : str = [True] * (end + 1)
UpperCAmelCase__ : Any = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Dict = False
start += 1
prime += in_prime
UpperCAmelCase__ : Optional[int] = end + 1
UpperCAmelCase__ : str = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase__ : List[str] = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase__ : List[str] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase__ : Union[str, Any] = high + 1
UpperCAmelCase__ : str = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(1_0**6))
| 75 | 0 |
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def A ( __snake_case: Optional[int] ) -> Any:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class UpperCamelCase__ :
"""simple docstring"""
def a__ ( self : str , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ):
'''simple docstring'''
pass
def a__ ( self : Optional[int] ):
'''simple docstring'''
pass
def a__ ( self : Any ):
'''simple docstring'''
pass
def a__ ( self : List[Any] , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : float ):
'''simple docstring'''
__magic_name__ = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def a__ ( self : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any=None , **UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ = FlaxVisionTextDualEncoderModel(_A )
__magic_name__ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def a__ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : str , UpperCamelCase_ : int=None , **UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.get_vision_text_model(_A , _A )
__magic_name__ = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def a__ ( self : Dict , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=None , **UpperCamelCase_ : Any ):
'''simple docstring'''
__magic_name__ = self.get_vision_text_model(_A , _A )
__magic_name__ = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__magic_name__ = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ = after_output[0]
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1e-3 )
def a__ ( self : Tuple , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = self.get_vision_text_model(_A , _A )
__magic_name__ = {'''vision_model''': vision_model, '''text_model''': text_model}
__magic_name__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__magic_name__ = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ = to_atuple(vision_model.config.image_size )
__magic_name__ = to_atuple(vision_model.config.patch_size )
__magic_name__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def a__ ( self : List[str] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple ):
'''simple docstring'''
pt_model.to(_A )
pt_model.eval()
# prepare inputs
__magic_name__ = inputs_dict
__magic_name__ = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__magic_name__ = pt_model(**_A ).to_tuple()
__magic_name__ = fx_model(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_A )
__magic_name__ = FlaxVisionTextDualEncoderModel.from_pretrained(_A , from_pt=_A )
__magic_name__ = fx_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(_A , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_A )
__magic_name__ = VisionTextDualEncoderModel.from_pretrained(_A , from_flax=_A )
pt_model_loaded.to(_A )
pt_model_loaded.eval()
with torch.no_grad():
__magic_name__ = pt_model_loaded(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(_A , pt_output_loaded.numpy() , 4e-2 )
def a__ ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ = VisionTextDualEncoderModel(_A )
__magic_name__ = FlaxVisionTextDualEncoderModel(_A )
__magic_name__ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _A )
__magic_name__ = fx_state
self.check_pt_flax_equivalence(_A , _A , _A )
def a__ ( self : List[Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Dict ):
'''simple docstring'''
__magic_name__ = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ = VisionTextDualEncoderModel(_A )
__magic_name__ = FlaxVisionTextDualEncoderModel(_A )
__magic_name__ = load_flax_weights_in_pytorch_model(_A , fx_model.params )
self.check_pt_flax_equivalence(_A , _A , _A )
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def a__ ( self : Any ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def a__ ( self : str ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def a__ ( self : Any ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@is_pt_flax_cross_test
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = self.prepare_config_and_inputs()
__magic_name__ = config_inputs_dict.pop('vision_config' )
__magic_name__ = config_inputs_dict.pop('text_config' )
__magic_name__ = config_inputs_dict
self.check_equivalence_pt_to_flax(_A , _A , _A )
self.check_equivalence_flax_to_pt(_A , _A , _A )
@slow
def a__ ( self : List[str] ):
'''simple docstring'''
__magic_name__ = self.get_pretrained_model_and_inputs()
__magic_name__ = model_a(**_A )
__magic_name__ = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__magic_name__ = FlaxVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ = model_a(**_A )
__magic_name__ = after_outputs[0]
__magic_name__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1e-5 )
@require_flax
class UpperCamelCase__ ( __a , unittest.TestCase):
"""simple docstring"""
def a__ ( self : int ):
'''simple docstring'''
__magic_name__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , )
__magic_name__ = 1_3
__magic_name__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a__ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ):
'''simple docstring'''
__magic_name__ = FlaxViTModel(_A )
__magic_name__ = FlaxBertModel(_A )
return vision_model, text_model
def a__ ( self : Optional[int] ):
'''simple docstring'''
__magic_name__ = FlaxViTModelTester(self )
__magic_name__ = FlaxBertModelTester(self )
__magic_name__ = vit_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ = vision_config_and_inputs
__magic_name__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class UpperCamelCase__ ( __a , unittest.TestCase):
"""simple docstring"""
def a__ ( self : Union[str, Any] ):
'''simple docstring'''
__magic_name__ = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=_A , text_from_pt=_A , )
__magic_name__ = 1_3
__magic_name__ = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__magic_name__ = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
__magic_name__ = random_attention_mask([batch_size, 4] )
__magic_name__ = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def a__ ( self : int , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
__magic_name__ = FlaxCLIPVisionModel(_A )
__magic_name__ = FlaxBertModel(_A )
return vision_model, text_model
def a__ ( self : Any ):
'''simple docstring'''
__magic_name__ = FlaxCLIPVisionModelTester(self )
__magic_name__ = FlaxBertModelTester(self )
__magic_name__ = clip_model_tester.prepare_config_and_inputs()
__magic_name__ = bert_model_tester.prepare_config_and_inputs()
__magic_name__ = vision_config_and_inputs
__magic_name__ = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class UpperCamelCase__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self : Dict ):
'''simple docstring'''
__magic_name__ = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
__magic_name__ = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
__magic_name__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__magic_name__ = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_A , padding=_A , return_tensors='np' )
__magic_name__ = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , _A , atol=1e-3 ) ) | 545 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase_ ( __a , __a , __a , unittest.TestCase ):
lowerCAmelCase__ = StableDiffusionInpaintPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase__ : int = PNDMScheduler(skip_prk_steps=_A )
torch.manual_seed(0 )
UpperCAmelCase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
UpperCAmelCase__ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowercase_ ( self : str , _A : Dict , _A : Any=0 ):
'''simple docstring'''
UpperCAmelCase__ : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' ).resize((64, 64) )
UpperCAmelCase__ : int = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase__ : List[Any] = torch.manual_seed(_A )
else:
UpperCAmelCase__ : str = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase__ : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': init_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase__ : Tuple = self.get_dummy_components()
UpperCAmelCase__ : str = StableDiffusionInpaintPipeline(**_A )
UpperCAmelCase__ : List[str] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase__ : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase__ : Any = sd_pipe(**_A ).images
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : int = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self : Tuple ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
UpperCAmelCase__ : Dict = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Optional[int] = StableDiffusionInpaintPipeline.from_pretrained(_A , safety_checker=_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : str = torch.manual_seed(0 )
UpperCAmelCase__ : str = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : int = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
UpperCAmelCase__ : Tuple = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : Any = StableDiffusionInpaintPipeline.from_pretrained(
_A , torch_dtype=torch.floataa , safety_checker=_A , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
UpperCAmelCase__ : Tuple = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , output_type='''np''' , )
UpperCAmelCase__ : Tuple = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def lowercase_ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
UpperCAmelCase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
UpperCAmelCase__ : Optional[Any] = '''stabilityai/stable-diffusion-2-inpainting'''
UpperCAmelCase__ : str = PNDMScheduler.from_pretrained(_A , subfolder='''scheduler''' )
UpperCAmelCase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_A , safety_checker=_A , scheduler=_A , torch_dtype=torch.floataa , )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase__ : Optional[int] = '''Face of a yellow cat, high resolution, sitting on a park bench'''
UpperCAmelCase__ : Tuple = torch.manual_seed(0 )
UpperCAmelCase__ : Any = pipe(
prompt=_A , image=_A , mask_image=_A , generator=_A , num_inference_steps=2 , output_type='''np''' , )
UpperCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 75 | 0 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase = logging.get_logger(__name__)
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = to_pil_image(lowerCAmelCase__ )
lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(lowerCAmelCase__ , lang=lowerCAmelCase__ , output_type="dict" , config=lowerCAmelCase__ )
lowerCAmelCase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(lowerCAmelCase__ ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(lowerCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase__ )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
assert len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __lowerCamelCase ( __a ):
"""simple docstring"""
snake_case__ = ["pixel_values"]
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : float = 1 / 255 , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE__ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "" , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Dict:
super().__init__(**_A )
lowerCAmelCase__ = size if size is not None else {'''height''': 224, '''width''': 224}
lowerCAmelCase__ = get_size_dict(_A )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_value
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, int] , SCREAMING_SNAKE_CASE__ : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> int:
lowerCAmelCase__ = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowerCAmelCase__ = (size['''height'''], size['''width'''])
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[int, float] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : str , ) -> Any:
return rescale(_A , scale=_A , data_format=_A , **_A )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Union[float, Iterable[float]] , SCREAMING_SNAKE_CASE__ : Union[float, Iterable[float]] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE__ : Optional[Any] , ) -> Any:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : ImageInput , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Dict[str, int] = None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : float = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE__ : Union[float, Iterable[float]] = None , SCREAMING_SNAKE_CASE__ : bool = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE__ : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ : Tuple , ) -> Optional[Any]:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(_A )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(_A ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ = apply_tesseract(_A , _A , _A )
words_batch.append(_A )
boxes_batch.append(_A )
if do_resize:
lowerCAmelCase__ = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(_A , _A ) for image in images]
lowerCAmelCase__ = BatchFeature(data={"pixel_values": images} , tensor_type=_A )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data
| 61 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase__ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , ) -> Tuple:
if attention_mask is None:
UpperCAmelCase__ : List[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase__ : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase__ : Optional[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase__ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase__ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
def __init__( self : Optional[Any] , _A : Optional[Any] , _A : str=13 , _A : int=7 , _A : Any=True , _A : List[Any]=False , _A : Optional[int]=99 , _A : Optional[int]=16 , _A : int=2 , _A : Optional[int]=4 , _A : Optional[int]=4 , _A : int="gelu" , _A : List[str]=0.1 , _A : str=0.1 , _A : int=32 , _A : Optional[int]=2 , _A : int=1 , _A : Dict=0 , _A : Dict=0.0_2 , ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Dict = seq_length
UpperCAmelCase__ : str = is_training
UpperCAmelCase__ : int = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : Union[str, Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Any = num_attention_heads
UpperCAmelCase__ : List[str] = intermediate_size
UpperCAmelCase__ : str = hidden_act
UpperCAmelCase__ : str = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Union[str, Any] = max_position_embeddings
UpperCAmelCase__ : int = eos_token_id
UpperCAmelCase__ : Optional[int] = pad_token_id
UpperCAmelCase__ : List[str] = bos_token_id
UpperCAmelCase__ : Union[str, Any] = initializer_range
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
UpperCAmelCase__ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
UpperCAmelCase__ : List[Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_A , )
UpperCAmelCase__ : Tuple = prepare_blenderbot_inputs_dict(_A , _A , _A )
return config, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase_ ( self : int , _A : List[Any] , _A : Optional[Any] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 20
UpperCAmelCase__ : int = model_class_name(_A )
UpperCAmelCase__ : str = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Dict = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Tuple = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
UpperCAmelCase__ : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : str = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _A , decoder_attention_mask=_A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_A , )
UpperCAmelCase__ : int = model.decode(_A , _A )
UpperCAmelCase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = 20
UpperCAmelCase__ : Optional[int] = model_class_name(_A )
UpperCAmelCase__ : Optional[int] = model.encode(inputs_dict['''input_ids'''] )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
UpperCAmelCase__ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase__ : List[Any] = model.init_cache(decoder_input_ids.shape[0] , _A , _A )
UpperCAmelCase__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase__ : int = model.decode(
decoder_input_ids[:, :-1] , _A , decoder_attention_mask=_A , past_key_values=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
UpperCAmelCase__ : Any = model.decode(
decoder_input_ids[:, -1:] , _A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_A , decoder_position_ids=_A , )
UpperCAmelCase__ : List[str] = model.decode(_A , _A , decoder_attention_mask=_A )
UpperCAmelCase__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
lowerCAmelCase__ = 9_9
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
UpperCAmelCase__ : int = input_ids.shape[0]
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self._get_config_and_data()
UpperCAmelCase__ : Any = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : Optional[int] = lm_model(input_ids=_A )
UpperCAmelCase__ : Dict = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
UpperCAmelCase__ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_A )
UpperCAmelCase__ : str = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Any = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
UpperCAmelCase__ : Tuple = lm_model(input_ids=_A , decoder_input_ids=_A )
UpperCAmelCase__ : int = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
UpperCAmelCase__ : Union[str, Any] = shift_tokens_right(_A , 1 , 2 )
UpperCAmelCase__ : str = np.equal(_A , 1 ).astype(np.floataa ).sum()
UpperCAmelCase__ : Dict = np.equal(_A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( __a , unittest.TestCase , __a ):
lowerCAmelCase__ = True
lowerCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Dict = FlaxBlenderbotModelTester(self )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_A , _A , _A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : Dict = self._prepare_for_class(_A , _A )
UpperCAmelCase__ : str = model_class(_A )
@jax.jit
def encode_jitted(_A : Any , _A : Tuple=None , **_A : Optional[int] ):
return model.encode(input_ids=_A , attention_mask=_A )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Optional[Any] = encode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Tuple = encode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase__ : List[str] = model_class(_A )
UpperCAmelCase__ : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
UpperCAmelCase__ : Tuple = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : Optional[int] , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_A , decoder_attention_mask=_A , encoder_outputs=_A , )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase__ : Any = decode_jitted(**_A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase__ : Optional[int] = decode_jitted(**_A ).to_tuple()
self.assertEqual(len(_A ) , len(_A ) )
for jitted_output, output in zip(_A , _A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase__ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase__ : Union[str, Any] = model(_A )
self.assertIsNotNone(_A )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
UpperCAmelCase__ : int = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
UpperCAmelCase__ : str = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_A )
UpperCAmelCase__ : Optional[Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
UpperCAmelCase__ : Optional[Any] = ['''Sam''']
UpperCAmelCase__ : Dict = tokenizer(_A , return_tensors='''jax''' )
UpperCAmelCase__ : List[str] = model.generate(**_A , **_A )
UpperCAmelCase__ : Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
UpperCAmelCase__ : Any = tokenizer.batch_decode(_A , **_A )
assert generated_txt[0].strip() == tgt_text
| 75 | 0 |
'''simple docstring'''
from itertools import product
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = sides_number
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_face_number * dice_number
SCREAMING_SNAKE_CASE_ : Optional[int] = [0] * (max_total + 1)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : List[str] = range(lowerCAmelCase__ , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase__ , repeat=lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = sum(lowerCAmelCase__ )
totals_frequencies[total] += 1
return totals_frequencies
def __lowerCamelCase ( ) -> float:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = total_frequency_distribution(
sides_number=4 , dice_number=9 )
SCREAMING_SNAKE_CASE_ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : str = 9
SCREAMING_SNAKE_CASE_ : str = 4 * 9
SCREAMING_SNAKE_CASE_ : Tuple = 6
for peter_total in range(lowerCAmelCase__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
SCREAMING_SNAKE_CASE_ : str = (4**9) * (6**6)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = peter_wins_count / total_games_number
SCREAMING_SNAKE_CASE_ : Any = round(lowerCAmelCase__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 421 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : str ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''content''': datasets.Value('''string''' )} ) , supervised_keys=_A , )
def lowercase_ ( self : int , _A : Optional[int] , _A : Optional[Any] ):
'''simple docstring'''
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_dummy_examples()} )]
def lowercase_ ( self : Union[str, Any] , _A : str , _A : Union[str, Any] ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
class lowerCamelCase_ ( datasets.BeamBasedBuilder ):
def lowercase_ ( self : Any ):
'''simple docstring'''
return datasets.DatasetInfo(
features=datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) , supervised_keys=_A , )
def lowercase_ ( self : Any , _A : List[str] , _A : Any ):
'''simple docstring'''
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''examples''': get_test_nested_examples()} )
]
def lowercase_ ( self : List[str] , _A : Optional[int] , _A : Tuple ):
'''simple docstring'''
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(_A )
def a__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
def a__ ( ) -> Optional[Any]:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(['''foo''', '''bar''', '''foobar'''] )]
class lowerCamelCase_ ( __a ):
@require_beam
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Any = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Union[str, Any] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : Any ):
'''simple docstring'''
import apache_beam as beam
UpperCAmelCase__ : List[str] = beam.io.parquetio.WriteToParquet
UpperCAmelCase__ : int = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Optional[int] = DummyBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
with patch('''apache_beam.io.parquetio.WriteToParquet''' ) as write_parquet_mock:
UpperCAmelCase__ : Dict = partial(_A , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train-00000-of-00002.arrow""" ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({'''content''': datasets.Value('''string''' )} ) )
UpperCAmelCase__ : Tuple = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset['''train''']['''content'''] ) , sorted(['''foo''', '''bar''', '''foobar'''] ) )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
@require_beam
def lowercase_ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : Tuple = DummyBeamDataset(cache_dir=_A )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase__ : int = NestedBeamDataset(cache_dir=_A , beam_runner='''DirectRunner''' )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , f"""{builder.name}-train.arrow""" ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({'''a''': datasets.Sequence({'''b''': datasets.Value('''string''' )} )} ) )
UpperCAmelCase__ : Optional[int] = builder.as_dataset()
self.assertEqual(dset['''train'''].num_rows , _A )
self.assertEqual(dset['''train'''].info.splits['''train'''].num_examples , _A )
self.assertDictEqual(dset['''train'''][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset['''train'''][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(_A , builder.name , '''default''' , '''0.0.0''' , '''dataset_info.json''' ) ) )
del dset
| 75 | 0 |
'''simple docstring'''
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowerCAmelCase ( __a ):
'''simple docstring'''
a_ = """naver-clova-ix/donut-base-finetuned-docvqa"""
a_ = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
a_ = """document_qa"""
a_ = AutoProcessor
a_ = VisionEncoderDecoderModel
a_ = ["""image""", """text"""]
a_ = ["""text"""]
def __init__( self : int ,*_a : Union[str, Any] ,**_a : List[Any] ):
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_A ,**_A )
def _a ( self : Optional[Any] ,_a : "Image" ,_a : str ):
'''simple docstring'''
A_ : Optional[int] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
A_ : Tuple = task_prompt.replace("""{user_input}""" ,_A )
A_ : Optional[Any] = self.pre_processor.tokenizer(
_A ,add_special_tokens=_A ,return_tensors="""pt""" ).input_ids
A_ : Dict = self.pre_processor(_A ,return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : Optional[Any] ,_a : List[str] ):
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) ,decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) ,max_length=self.model.decoder.config.max_position_embeddings ,early_stopping=_A ,pad_token_id=self.pre_processor.tokenizer.pad_token_id ,eos_token_id=self.pre_processor.tokenizer.eos_token_id ,use_cache=_A ,num_beams=1 ,bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] ,return_dict_in_generate=_A ,).sequences
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : List[str] = self.pre_processor.batch_decode(_A )[0]
A_ : Dict = sequence.replace(self.pre_processor.tokenizer.eos_token ,"""""" )
A_ : List[Any] = sequence.replace(self.pre_processor.tokenizer.pad_token ,"""""" )
A_ : int = re.sub(r"""<.*?>""" ,"""""" ,_A ,count=1 ).strip() # remove first task start token
A_ : Optional[Any] = self.pre_processor.tokenajson(_A )
return sequence["answer"]
| 665 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
UpperCamelCase__ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[int] = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase__ : Any = get_sagemaker_input()
else:
UpperCAmelCase__ : List[str] = get_cluster_input()
return config
def a__ ( lowerCAmelCase__=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase__ : Union[str, Any] = subparsers.add_parser('''config''' , description=lowerCAmelCase__ )
else:
UpperCAmelCase__ : Dict = argparse.ArgumentParser('''Accelerate config command''' , description=lowerCAmelCase__ )
parser.add_argument(
'''--config_file''' , default=lowerCAmelCase__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase__ )
return parser
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
UpperCAmelCase__ : Any = args.config_file
else:
if not os.path.isdir(lowerCAmelCase__ ):
os.makedirs(lowerCAmelCase__ )
UpperCAmelCase__ : int = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(lowerCAmelCase__ )
else:
config.to_yaml_file(lowerCAmelCase__ )
print(F"""accelerate configuration saved at {config_file}""" )
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[int] = config_command_parser()
UpperCAmelCase__ : Any = parser.parse_args()
config_command(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 75 | 0 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowercase = logging.get_logger(__name__)
class lowercase_ ( __a ):
def __init__( self , *__A , **__A ) -> Any:
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 443 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# Construct model
if gpta_config_file == "":
UpperCAmelCase__ : Optional[Any] = GPTaConfig()
else:
UpperCAmelCase__ : Tuple = GPTaConfig.from_json_file(lowerCAmelCase__ )
UpperCAmelCase__ : Dict = GPTaModel(lowerCAmelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
UpperCAmelCase__ : Union[str, Any] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowerCAmelCase__ )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCamelCase__ = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 99 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCamelCase ( __a ):
'''simple docstring'''
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate("steps_offset!=1" , "1.0.0" , _A , standard_warn=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = dict(scheduler.config )
__SCREAMING_SNAKE_CASE : List[str] = 1
__SCREAMING_SNAKE_CASE : Optional[Any] = FrozenDict(_A )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
__SCREAMING_SNAKE_CASE : List[str] = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate("skip_prk_steps not set" , "1.0.0" , _A , standard_warn=_A )
__SCREAMING_SNAKE_CASE : str = dict(scheduler.config )
__SCREAMING_SNAKE_CASE : List[str] = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = FrozenDict(_A )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=_A , segmentation_processor=_A , vae=_A , text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , safety_checker=_A , feature_extractor=_A , )
def a_ ( self , a__ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__SCREAMING_SNAKE_CASE : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_A )
def a_ ( self ):
self.enable_attention_slicing(_A )
def a_ ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__SCREAMING_SNAKE_CASE : List[Any] = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def a_ ( self ):
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , a__ , a__ , a__ , a__ = 512 , a__ = 512 , a__ = 50 , a__ = 7.5 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = None , a__ = None , a__ = "pil" , a__ = True , a__ = None , a__ = 1 , **a__ , ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
__SCREAMING_SNAKE_CASE : str = self.segmentation_model(**_A )
__SCREAMING_SNAKE_CASE : Any = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__SCREAMING_SNAKE_CASE : Any = self.numpy_to_pil(_A )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_A , image=_A , mask_image=_A , height=_A , width=_A , num_inference_steps=_A , guidance_scale=_A , negative_prompt=_A , num_images_per_prompt=_A , eta=_A , generator=_A , latents=_A , output_type=_A , return_dict=_A , callback=_A , callback_steps=_A , )
| 211 |
'''simple docstring'''
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
UpperCamelCase__ = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
UpperCamelCase__ = F"""https://www.google.com/search?q={query}&num=100"""
UpperCamelCase__ = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
UpperCamelCase__ = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
UpperCamelCase__ = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 75 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.