code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__lowercase = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__lowercase = InstructBlipProcessor(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).tokenizer
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).image_processor
def _SCREAMING_SNAKE_CASE ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ).qformer_tokenizer
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowercase = self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
__lowercase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor.qformer_tokenizer , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_qformer_tokenizer()
__lowercase = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCAmelCase__ , return_tensors='''np''' )
__lowercase = processor(images=lowerCAmelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_qformer_tokenizer()
__lowercase = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = processor(text=lowerCAmelCase__ )
__lowercase = tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
__lowercase = qformer_tokenizer(lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor['''qformer_''' + key] )
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_qformer_tokenizer()
__lowercase = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_qformer_tokenizer()
__lowercase = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = self.get_qformer_tokenizer()
__lowercase = InstructBlipProcessor(
tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ , qformer_tokenizer=lowerCAmelCase__ )
__lowercase = '''lower newer'''
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(
list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] , ) | 210 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Any = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Optional[int] = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : str = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 210 | 1 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__UpperCamelCase = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__UpperCamelCase = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__UpperCamelCase = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
if version.parse(scb.__version__ ) < version.parse("""1.4.12""" ):
raise ImportWarning(
"""To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"""
"""You can install it with `pip install \"sacrebleu>=1.4.12\"`.""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""http://www.cs.umd.edu/~snover/tercom/""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#ter"""] , reference_urls=[
"""https://github.com/jhclark/tercom""",
] , )
def lowercase__ ( self : int , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , ) -> Tuple:
"""simple docstring"""
__snake_case : int = len(references[0] )
if any(len(__magic_name__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
__snake_case : List[Any] = [[refs[i] for refs in references] for i in range(__magic_name__ )]
__snake_case : Optional[int] = TER(
normalized=__magic_name__ , no_punct=__magic_name__ , asian_support=__magic_name__ , case_sensitive=__magic_name__ , )
__snake_case : Union[str, Any] = sb_ter.corpus_score(__magic_name__ , __magic_name__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 352 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : Tuple , __magic_name__ : List[str] , __magic_name__ : str=13 , __magic_name__ : int=10 , __magic_name__ : Any=3 , __magic_name__ : List[Any]=2 , __magic_name__ : List[Any]=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Union[str, Any]=True , __magic_name__ : Any=32 , __magic_name__ : int=5 , __magic_name__ : Optional[int]=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Dict="gelu" , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=10 , __magic_name__ : List[str]=0.02 , __magic_name__ : Optional[Any]="divided_space_time" , __magic_name__ : int=None , ) -> List[str]:
"""simple docstring"""
__snake_case : List[Any] = parent
__snake_case : List[str] = batch_size
__snake_case : Union[str, Any] = image_size
__snake_case : List[Any] = num_channels
__snake_case : List[str] = patch_size
__snake_case : List[str] = num_frames
__snake_case : Union[str, Any] = is_training
__snake_case : List[str] = use_labels
__snake_case : str = hidden_size
__snake_case : Union[str, Any] = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Dict = intermediate_size
__snake_case : Tuple = hidden_act
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[int] = attention_probs_dropout_prob
__snake_case : Union[str, Any] = attention_type
__snake_case : Optional[Any] = initializer_range
__snake_case : Optional[Any] = scope
__snake_case : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__snake_case : str = (image_size // patch_size) ** 2
__snake_case : Optional[Any] = (num_frames) * self.num_patches_per_frame + 1
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__snake_case : str = self.num_labels
return config
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : Tuple , __magic_name__ : Dict ) -> int:
"""simple docstring"""
__snake_case : Optional[int] = TimesformerModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Tuple = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Any = TimesformerForVideoClassification(__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : Optional[int] = model(__magic_name__ )
# verify the logits shape
__snake_case : Dict = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : Tuple = config_and_inputs
__snake_case : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Dict = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
lowercase__: List[Any] = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: List[str] = False
lowercase__: List[Any] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : List[str] = TimesformerModelTester(self )
__snake_case : List[Any] = ConfigTester(
self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : Any , __magic_name__ : Tuple , __magic_name__ : List[str] , __magic_name__ : Union[str, Any]=False ) -> int:
"""simple docstring"""
__snake_case : Dict = copy.deepcopy(__magic_name__ )
if return_labels:
if model_class in get_values(__magic_name__ ):
__snake_case : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
__snake_case , __snake_case : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = model_class(__magic_name__ )
__snake_case : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Union[str, Any] = [*signature.parameters.keys()]
__snake_case : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
__snake_case : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__magic_name__ )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : int = TimesformerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : Dict = True
for model_class in self.all_model_classes:
__snake_case : List[str] = self.model_tester.seq_length
__snake_case : Tuple = self.model_tester.num_frames
__snake_case : str = True
__snake_case : List[str] = False
__snake_case : Tuple = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : List[str] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Dict = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Optional[int] = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Union[str, Any] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__snake_case : int = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Optional[int] = True
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : List[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : List[str] , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ):
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : int = outputs.hidden_states
__snake_case : Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : str = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
def _a ( ) -> List[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : List[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
__magic_name__ )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : Dict = prepare_video()
__snake_case : Any = image_processor(video[:8] , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Any = model(**__magic_name__ )
# verify the logits
__snake_case : int = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
| 13 | 0 |
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowercase_ :int , lowercase_ :Tuple=13 , lowercase_ :Any=7 , lowercase_ :List[str]=True , lowercase_ :Optional[int]=True , lowercase_ :str=False , lowercase_ :Optional[int]=True , lowercase_ :Union[str, Any]=99 , lowercase_ :Optional[int]=32 , lowercase_ :int=5 , lowercase_ :int=4 , lowercase_ :Optional[int]=64 , lowercase_ :Union[str, Any]="gelu" , lowercase_ :Union[str, Any]=0.1 , lowercase_ :List[Any]=0.1 , lowercase_ :Union[str, Any]=5_12 , lowercase_ :Tuple=16 , lowercase_ :Union[str, Any]=2 , lowercase_ :Optional[Any]=0.02 , lowercase_ :Union[str, Any]=3 , lowercase_ :int=4 , lowercase_ :Any=None , lowercase_ :Any=2 , lowercase_ :List[Any]=2 , lowercase_ :Optional[Any]=2 , lowercase_ :Any=2 , lowercase_ :int=4 , lowercase_ :Optional[int]=1 , ) -> int:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = q_groups
UpperCAmelCase = k_groups
UpperCAmelCase = v_groups
UpperCAmelCase = post_attention_groups
UpperCAmelCase = intermediate_groups
UpperCAmelCase = output_groups
def UpperCAmelCase__ ( self :Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[str] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Tuple ) -> str:
UpperCAmelCase = SqueezeBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Optional[int] , lowercase_ :str , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :Dict , lowercase_ :Any ) -> int:
UpperCAmelCase = SqueezeBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[Any] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :List[str] ) -> Dict:
UpperCAmelCase = SqueezeBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :str , lowercase_ :List[str] , lowercase_ :int , lowercase_ :str , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] ) -> int:
UpperCAmelCase = self.num_labels
UpperCAmelCase = SqueezeBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :Any , lowercase_ :Dict , lowercase_ :Optional[int] , lowercase_ :Tuple ) -> Any:
UpperCAmelCase = self.num_labels
UpperCAmelCase = SqueezeBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :int , lowercase_ :Dict , lowercase_ :Any , lowercase_ :Tuple , lowercase_ :Optional[Any] , lowercase_ :int , lowercase_ :str ) -> int:
UpperCAmelCase = self.num_choices
UpperCAmelCase = SqueezeBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :Optional[Any] ) -> Tuple:
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__UpperCamelCase = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
__UpperCamelCase = False
def UpperCAmelCase__ ( self :str ) -> Dict:
UpperCAmelCase = SqueezeBertModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , dim=37 )
def UpperCAmelCase__ ( self :int ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :List[str] ) -> Tuple:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Dict:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*lowercase_ )
def UpperCAmelCase__ ( self :str ) -> Union[str, Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :List[str] ) -> Any:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowercase_ )
@slow
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SqueezeBertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Tuple ) -> int:
UpperCAmelCase = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
UpperCAmelCase = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = torch.Size((1, 3) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-4 ) )
| 78 |
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self :Dict , lowercase_ :str = "▁" , lowercase_ :bool = True , lowercase_ :Union[str, AddedToken] = "<unk>" , lowercase_ :Union[str, AddedToken] = "</s>" , lowercase_ :Union[str, AddedToken] = "<pad>" , ) -> str:
UpperCAmelCase = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
UpperCAmelCase = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
UpperCAmelCase = token_dict['token']
UpperCAmelCase = Tokenizer(Unigram() )
UpperCAmelCase = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
UpperCAmelCase = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ ),
pre_tokenizers.Digits(individual_digits=lowercase_ ),
pre_tokenizers.Punctuation(),
] )
UpperCAmelCase = decoders.Metaspace(replacement=lowercase_ , add_prefix_space=lowercase_ )
UpperCAmelCase = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
UpperCAmelCase = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(lowercase_ , lowercase_ )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Union[str, List[str]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Union[str, Any]:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [files]
self._tokenizer.train(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :str , lowercase_ :Union[Iterator[str], Iterator[Iterator[str]]] , lowercase_ :int = 80_00 , lowercase_ :bool = True , ) -> Tuple:
UpperCAmelCase = trainers.UnigramTrainer(
vocab_size=lowercase_ , special_tokens=self.special_tokens_list , show_progress=lowercase_ , )
self._tokenizer.train_from_iterator(lowercase_ , trainer=lowercase_ )
self.add_unk_id()
def UpperCAmelCase__ ( self :Union[str, Any] ) -> int:
UpperCAmelCase = json.loads(self._tokenizer.to_str() )
UpperCAmelCase = self.special_tokens['unk']['id']
UpperCAmelCase = Tokenizer.from_str(json.dumps(lowercase_ ) )
| 78 | 1 |
'''simple docstring'''
# using dfs for finding eulerian path traversal
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None ):
'''simple docstring'''
_UpperCAmelCase : Dict =(path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =True, True
_UpperCAmelCase : Optional[Any] =dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return path
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : int =0
_UpperCAmelCase : Optional[int] =-1
for i in range(__lowerCamelCase ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
_UpperCAmelCase : Union[str, Any] =i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =[[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
_UpperCAmelCase , _UpperCAmelCase : Tuple =check_circuit_or_path(__lowerCamelCase , __lowerCamelCase )
if check == 3:
print('graph is not Eulerian' )
print('no path' )
return
_UpperCAmelCase : Optional[Any] =1
if check == 2:
_UpperCAmelCase : int =odd_node
print('graph has a Euler path' )
if check == 1:
print('graph has a Euler cycle' )
_UpperCAmelCase : Optional[int] =dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
def lowerCamelCase__ ( ):
'''simple docstring'''
_UpperCAmelCase : List[str] ={1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_UpperCAmelCase : Tuple ={1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_UpperCAmelCase : List[str] ={1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_UpperCAmelCase : Any ={1: [2, 3], 2: [1, 3], 3: [1, 2]}
_UpperCAmelCase : Tuple ={
1: [],
2: []
# all degree is zero
}
_UpperCAmelCase : Optional[Any] =1_0
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
check_euler(__lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 242 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , **snake_case) -> Optional[int]:
'''simple docstring'''
super().__init__(**snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , snake_case , **snake_case) -> str:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , **snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : str ={}
if "candidate_labels" in kwargs:
_UpperCAmelCase : Union[str, Any] =kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_UpperCAmelCase : List[Any] =kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase ( self , snake_case , snake_case=None , snake_case="This is a photo of {}.") -> Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =load_image(snake_case)
_UpperCAmelCase : Union[str, Any] =self.image_processor(images=[image] , return_tensors=self.framework)
_UpperCAmelCase : Union[str, Any] =candidate_labels
_UpperCAmelCase : List[Any] =[hypothesis_template.format(snake_case) for x in candidate_labels]
_UpperCAmelCase : str =self.tokenizer(snake_case , return_tensors=self.framework , padding=snake_case)
_UpperCAmelCase : Any =[text_inputs]
return inputs
def lowerCAmelCase ( self , snake_case) -> str:
'''simple docstring'''
_UpperCAmelCase : List[str] =model_inputs.pop('candidate_labels')
_UpperCAmelCase : Tuple =model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , snake_case):
_UpperCAmelCase : Any =text_inputs[0]
else:
# Batching case.
_UpperCAmelCase : str =text_inputs[0][0]
_UpperCAmelCase : Any =self.model(**snake_case , **snake_case)
_UpperCAmelCase : List[str] ={
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def lowerCAmelCase ( self , snake_case) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str =model_outputs.pop('candidate_labels')
_UpperCAmelCase : Union[str, Any] =model_outputs['logits'][0]
if self.framework == "pt":
_UpperCAmelCase : Dict =logits.softmax(dim=-1).squeeze(-1)
_UpperCAmelCase : Union[str, Any] =probs.tolist()
if not isinstance(snake_case , snake_case):
_UpperCAmelCase : Union[str, Any] =[scores]
elif self.framework == "tf":
_UpperCAmelCase : Dict =stable_softmax(snake_case , axis=-1)
_UpperCAmelCase : str =probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : List[str] =[
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(snake_case , snake_case) , key=lambda snake_case: -x[0])
]
return result
| 242 | 1 |
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :Any = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a :Dict = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a :str = False
a :Any = False
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Optional[int]:
lowercase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowercase_ = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int=1_3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Tuple=3_2 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_7 , SCREAMING_SNAKE_CASE_ : Optional[int]="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=5_1_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[str]=0.02 , SCREAMING_SNAKE_CASE_ : Any=3 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , ) -> Any:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
lowercase_ = embedding_size
def _lowercase ( self : List[str] ) -> List[str]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = TFMobileBertModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = [input_ids, input_mask]
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
lowercase_ = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ) -> str:
lowercase_ = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
lowercase_ = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
lowercase_ = self.num_choices
lowercase_ = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Tuple:
lowercase_ = self.num_labels
lowercase_ = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
lowercase_ = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Any ) -> List[str]:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def _lowercase ( self : int ) -> List[str]:
lowercase_ = TFMobileBertModelTest.TFMobileBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : List[str] ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] ) -> Dict:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Dict ) -> Any:
# for model_name in TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["google/mobilebert-uncased"]:
lowercase_ = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
lowercase_ = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = tf.constant(
[
[
[-4.5_91_95_47, -9.24_82_95, -9.64_52_56],
[-6.7_30_61_75, -6.44_02_84, -6.6_05_28_37],
[-7.2_74_35_06, -6.7_84_79_15, -6.02_46_73],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
| 30 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase : List[Any] = datasets.load_iris()
lowerCAmelCase : List[str] = np.array(data['data'])
lowerCAmelCase : Any = np.array(data['target'])
lowerCAmelCase : Dict = data['target_names']
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = train_test_split(X, y)
def A_ ( a , a ):
"""simple docstring"""
return np.linalg.norm(np.array(a ) - np.array(a ) )
def A_ ( a , a , a , a , a=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = zip(a , a )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE_ : List[Any] = []
for data_point in data:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE_ : List[str] = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE_ : List[Any] = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 253 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 333 | import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str):
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(_lowerCamelCase)
lowercase__ : List[str] = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase)
lowercase__ : List[str] = checkpoints.load_tax_checkpoint(_lowerCamelCase)
lowercase__ : Dict = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowercase__ : Any = "SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowercase__ : int = "LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Dict = "TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
" attribute with a value from ['local', 'transient-global].")
# Encoder
for layer_index in range(config.num_layers):
lowercase__ : str = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : List[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowercase__ : Any = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Optional[Any] = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowercase__ : Tuple = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : Optional[int] = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : str = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : int = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : int = flax_model.params["encoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : Any = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[str] = tax_attention_value
lowercase__ : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Any = tax_global_layer_norm
if split_mlp_wi:
lowercase__ : Tuple = tax_mlp_wi_a
lowercase__ : str = tax_mlp_wi_a
else:
lowercase__ : List[Any] = tax_mlp_wi
lowercase__ : str = tax_mlp_wo
lowercase__ : int = tax_mlp_layer_norm
lowercase__ : List[str] = flax_model_encoder_layer_block
# Only for layer 0:
lowercase__ : Dict = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : Optional[int] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowercase__ : Tuple = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_encoder_global_rel_embedding
# Assigning
lowercase__ : Optional[int] = tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowercase__ : Union[str, Any] = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers):
lowercase__ : Dict = f'''layers_{str(_lowerCamelCase)}'''
# Self-Attention
lowercase__ : str = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowercase__ : Tuple = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowercase__ : List[str] = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowercase__ : int = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowercase__ : Any = tax_enc_dec_attention_module["key"]["kernel"]
lowercase__ : Union[str, Any] = tax_enc_dec_attention_module["out"]["kernel"]
lowercase__ : Any = tax_enc_dec_attention_module["query"]["kernel"]
lowercase__ : Tuple = tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowercase__ : Dict = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowercase__ : Union[str, Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowercase__ : Any = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowercase__ : List[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowercase__ : Optional[Any] = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowercase__ : Optional[int] = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowercase__ : Optional[Any] = flax_model.params["decoder"]["block"][str(_lowerCamelCase)]["layer"]
lowercase__ : Any = tax_attention_key
lowercase__ : List[Any] = tax_attention_out
lowercase__ : Any = tax_attention_query
lowercase__ : List[Any] = tax_attention_value
lowercase__ : List[str] = tax_pre_attention_layer_norm
lowercase__ : List[Any] = tax_enc_dec_attention_key
lowercase__ : Optional[Any] = tax_enc_dec_attention_out
lowercase__ : str = tax_enc_dec_attention_query
lowercase__ : Union[str, Any] = tax_enc_dec_attention_value
lowercase__ : Tuple = tax_cross_layer_norm
if split_mlp_wi:
lowercase__ : List[str] = tax_mlp_wi_a
lowercase__ : List[Any] = tax_mlp_wi_a
else:
lowercase__ : Tuple = tax_mlp_wi
lowercase__ : Any = tax_mlp_wo
lowercase__ : Tuple = txa_mlp_layer_norm
lowercase__ : int = flax_model_decoder_layer_block
# Decoder Normalization
lowercase__ : str = tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowercase__ : List[Any] = txa_decoder_norm
# Only for layer 0:
lowercase__ : List[str] = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowercase__ : str = tax_decoder_rel_embedding
# Token Embeddings
lowercase__ : Optional[Any] = tax_model["target"]["token_embedder"]["embedding"]
lowercase__ : Optional[Any] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowercase__ : Optional[int] = tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(_lowerCamelCase)
print("T5X Model was sucessfully converted!")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 333 | 1 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase__ : Optional[str] = field(
default='./' ,metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for training.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.1 ,metadata={'help': 'Value of weight decay.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0 ,metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase__ : Optional[float] = field(default=2E-4 ,metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase__ : Optional[str] = field(default='cosine' ,metadata={'help': 'Learning rate.'} )
lowerCamelCase__ : Optional[int] = field(
default=7_5_0 ,metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_6 ,metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase__ : Optional[int] = field(default=5_0_0_0_0 ,metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Training seed.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_2_4 ,metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} ,)
lowerCamelCase__ : Optional[str] = field(
default=A__ ,metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' ,metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase__ : Optional[int] = field(default=2 ,metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0_2_4 ,metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase__ : Optional[float] = field(default=0.2 ,metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase__ : Optional[int] = field(default=2_5_6 ,metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase__ : Optional[int] = field(default=0 ,metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase__ : Optional[float] = field(default=0.9_5 ,metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase__ : Optional[int] = field(default=1_0 ,metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase__ : Optional[int] = field(
default=2_0_0 ,metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase__ : Optional[int] = field(default=1 ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='eval_results.json' ,metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase__ : Optional[str] = field(
default='0' ,metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase__ : Optional[int] = field(
default=-1 ,metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} ,)
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[int] = field(
default=A__ ,metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} ,)
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot' ,metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot-clean' ,metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase__ : Optional[int] = field(
default=1_0_0_0_0_0 ,metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0_0 ,metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1_0_0 ,metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.2_5 ,metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=1.5 ,metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.7 ,metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} ,)
lowerCamelCase__ : Optional[bool] = field(
default=A__ ,metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase__ : Optional[float] = field(
default=0.8_5 ,metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2' ,metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase__ : Optional[str] = field(
default='transformersbook/codeparrot-train' ,metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='content' ,metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase__ : Optional[int] = field(default=2_0_0_0_0_0 ,metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase__ : Optional[int] = field(
default=3_2_7_6_8 ,metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' ,metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase__ : Optional[str] = field(
default='tokenized-codeparrot-train' ,metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase__ : Optional[int] = field(default=A__ ,metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class lowerCamelCase :
lowerCamelCase__ : Optional[str] = field(
default='gpt2-large' ,metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase__ : Optional[str] = field(
default='codeparrot/codeparrot' ,metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase__ : Optional[str] = field(default='codeparrot' ,metadata={'help': 'Name of the created model.'} )
lowerCamelCase__ : Optional[bool] = field(default=A__ ,metadata={'help': 'Push saved tokenizer to the hub.'} )
| 165 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCAmelCase = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 356 | """simple docstring"""
from __future__ import annotations
import math
def lowercase ( a__ : int ) -> list[int]:
if num <= 0:
_UpperCamelCase = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(a__ )
_UpperCamelCase = [True] * (num + 1)
_UpperCamelCase = []
_UpperCamelCase = 2
_UpperCamelCase = int(math.sqrt(a__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , a__ ):
if sieve[i] is True:
_UpperCamelCase = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(a__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 54 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = [], []
__SCREAMING_SNAKE_CASE = list(zip(lowerCAmelCase_ , lowerCAmelCase_ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sorted_examples[0]
def is_too_big(lowerCAmelCase_ ):
return tok(lowerCAmelCase_ , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__SCREAMING_SNAKE_CASE = new_src + " " + src
__SCREAMING_SNAKE_CASE = new_tgt + " " + tgt
if is_too_big(lowerCAmelCase_ ) or is_too_big(lowerCAmelCase_ ): # cant fit, finalize example
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = src, tgt
else: # can fit, keep adding
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCAmelCase_ )
finished_tgt.append(lowerCAmelCase_ )
return finished_src, finished_tgt
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Path(lowerCAmelCase_ )
save_path.mkdir(exist_ok=lowerCAmelCase_ )
for split in ["train"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE = [x.rstrip() for x in Path(lowerCAmelCase_ ).open().readlines()]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pack_examples(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
print(f"""packed {split} split from {len(lowerCAmelCase_ )} examples -> {len(lowerCAmelCase_ )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(lowerCAmelCase_ ) )
for split in ["val", "test"]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.source""" )
shutil.copyfile(lowerCAmelCase_ , save_path / f"""{split}.target""" )
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=lowerCAmelCase_ , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=lowerCAmelCase_ , default=128 )
parser.add_argument("--data_dir" , type=lowerCAmelCase_ )
parser.add_argument("--save_path" , type=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCAmelCase_ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 54 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class A_ :
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=1 , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=3 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = TaEncoderModel.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-t5''' )
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=3_2 , layers_per_block=[1, 2] , block_out_channels=[3_2, 6_4] , down_block_types=[
'''ResnetDownsampleBlock2D''',
'''SimpleCrossAttnDownBlock2D''',
] , mid_block_type='''UNetMidBlock2DSimpleCrossAttn''' , up_block_types=['''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''] , in_channels=6 , out_channels=6 , cross_attention_dim=3_2 , encoder_hid_dim=3_2 , attention_head_dim=8 , addition_embed_type='''text''' , addition_embed_type_num_heads=2 , cross_attention_norm='''group_norm''' , resnet_time_scale_shift='''scale_shift''' , act_fn='''gelu''' , class_embed_type='''timestep''' , mid_block_scale_factor=1.4_14 , time_embedding_act_fn='''gelu''' , time_embedding_dim=3_2 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , thresholding=_A , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type='''epsilon''' , variance_type='''learned_range''' , )
torch.manual_seed(0 )
UpperCAmelCase = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''squaredcos_cap_v2''' , beta_start=0.00_01 , beta_end=0.02 , )
torch.manual_seed(0 )
UpperCAmelCase = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''prompt''']
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
if "image" in inputs:
UpperCAmelCase = inputs['''image''']
else:
UpperCAmelCase = None
if "mask_image" in inputs:
UpperCAmelCase = inputs['''mask_image''']
else:
UpperCAmelCase = None
if "original_image" in inputs:
UpperCAmelCase = inputs['''original_image''']
else:
UpperCAmelCase = None
UpperCAmelCase , UpperCAmelCase = pipe.encode_prompt(_A )
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"""`{optional_component}` did not stay set to None after loading.""" , )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = inputs['''generator''']
UpperCAmelCase = inputs['''num_inference_steps''']
UpperCAmelCase = inputs['''output_type''']
# inputs with prompt converted to embeddings
UpperCAmelCase = {
'''prompt_embeds''': prompt_embeds,
'''negative_prompt_embeds''': negative_prompt_embeds,
'''generator''': generator,
'''num_inference_steps''': num_inference_steps,
'''output_type''': output_type,
}
if image is not None:
UpperCAmelCase = image
if mask_image is not None:
UpperCAmelCase = mask_image
if original_image is not None:
UpperCAmelCase = original_image
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
UpperCAmelCase = self.get_dummy_inputs(_A )
UpperCAmelCase = pipe_loaded(**_A )[0]
UpperCAmelCase = np.abs(to_np(_A ) - to_np(_A ) ).max()
self.assertLess(_A , 1E-4 )
| 273 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__A = logging.get_logger(__name__)
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
def run_func(_lowercase ):
@wraps(_lowercase )
def run_in_eager_mode(*_lowercase , **_lowercase ):
return func(*_lowercase , **_lowercase )
@wraps(_lowercase )
@tf.function(experimental_compile=_lowercase )
def run_in_graph_mode(*_lowercase , **_lowercase ):
return func(*_lowercase , **_lowercase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = random.Random()
_A = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(_lowercase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE ( snake_case ):
"""simple docstring"""
A_ = 42
A_ = 42
A_ = "TensorFlow"
@property
def __A ( self: Optional[int] ) -> List[str]:
return tf.__version__
def __A ( self: int , __A: str , __A: int , __A: int ) -> float:
# initialize GPU on separate process
_A = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_A = self._prepare_inference_func(__A , __A , __A )
return self._measure_speed(_inference )
def __A ( self: Optional[int] , __A: str , __A: int , __A: int ) -> float:
_A = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_A = self._prepare_train_func(__A , __A , __A )
return self._measure_speed(_train )
def __A ( self: Union[str, Any] , __A: str , __A: int , __A: int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
_A = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_A = self._prepare_inference_func(__A , __A , __A )
return self._measure_memory(_inference )
def __A ( self: Dict , __A: str , __A: int , __A: int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __A )
_A = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
_A = self._prepare_train_func(__A , __A , __A )
return self._measure_memory(_train )
def __A ( self: Union[str, Any] , __A: str , __A: int , __A: int ) -> Callable[[], None]:
_A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_A = (
hasattr(__A , '''architectures''' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('''transformers''' , fromlist=[model_class] )
_A = getattr(__A , __A )
_A = model_cls(__A )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_A = TF_MODEL_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(__A , '''vocab_size''' ) else config.encoder.vocab_size
_A = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__A , decoder_input_ids=__A , training=__A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__A , training=__A )
_A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __A ( self: str , __A: str , __A: int , __A: int ) -> Callable[[], None]:
_A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
_A = (
hasattr(__A , '''architectures''' )
and isinstance(config.architectures , __A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
_A = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
_A = __import__('''transformers''' , fromlist=[model_class] )
_A = getattr(__A , __A )
_A = model_cls(__A )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
_A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__A )
# encoder-decoder has vocab size saved differently
_A = config.vocab_size if hasattr(__A , '''vocab_size''' ) else config.encoder.vocab_size
_A = random_input_ids(__A , __A , __A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
_A = model(__A , decoder_input_ids=__A , labels=__A , training=__A )[0]
_A = tf.gradients(__A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
_A = model(__A , labels=__A , training=__A )[0]
_A = tf.gradients(__A , model.trainable_variables )
return gradients
_A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __A ( self: Optional[int] , __A: List[Any] ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(__A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
_A = timeit.repeat(
__A , repeat=self.args.repeat , number=10 , )
return min(__A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def __A ( self: int , __A: Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
_A = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
_A = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
_A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
_A = nvml.nvmlDeviceGetMemoryInfo(__A )
_A = meminfo.used
_A = Memory(__A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
_A = None
else:
_A = measure_peak_memory_cpu(__A )
_A = Memory(__A ) if isinstance(__A , __A ) else memory_bytes
if self.args.trace_memory_line_by_line:
_A = stop_memory_tracing(__A )
if memory is None:
_A = summary.total
else:
_A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 75 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = UnCLIPImageVariationPipeline
A_ = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"}
A_ = IMAGE_VARIATION_BATCH_PARAMS
A_ = [
"generator",
"return_dict",
"decoder_num_inference_steps",
"super_res_num_inference_steps",
]
A_ = False
@property
def __A ( self: Optional[Any] ) -> Optional[Any]:
return 32
@property
def __A ( self: List[str] ) -> Dict:
return 32
@property
def __A ( self: List[str] ) -> List[str]:
return self.time_input_dim
@property
def __A ( self: Union[str, Any] ) -> Optional[int]:
return self.time_input_dim * 4
@property
def __A ( self: List[Any] ) -> Any:
return 1_00
@property
def __A ( self: List[str] ) -> Union[str, Any]:
_A = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __A ( self: Optional[Any] ) -> Optional[Any]:
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__A )
@property
def __A ( self: List[str] ) -> int:
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(__A )
@property
def __A ( self: str ) -> List[str]:
torch.manual_seed(0 )
_A = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
_A = UnCLIPTextProjModel(**__A )
return model
@property
def __A ( self: Tuple ) -> str:
torch.manual_seed(0 )
_A = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
_A = UNetaDConditionModel(**__A )
return model
@property
def __A ( self: Tuple ) -> Any:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def __A ( self: List[Any] ) -> Any:
torch.manual_seed(0 )
_A = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def __A ( self: List[Any] ) -> Dict:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_A = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def __A ( self: List[str] ) -> str:
_A = self.dummy_decoder
_A = self.dummy_text_proj
_A = self.dummy_text_encoder
_A = self.dummy_tokenizer
_A = self.dummy_super_res_first
_A = self.dummy_super_res_last
_A = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_A = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=10_00 , )
_A = CLIPImageProcessor(crop_size=32 , size=32 )
_A = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def __A ( self: Dict , __A: List[str] , __A: Any=0 , __A: Union[str, Any]=True ) -> Optional[Any]:
_A = floats_tensor((1, 3, 32, 32) , rng=random.Random(__A ) ).to(__A )
if str(__A ).startswith('''mps''' ):
_A = torch.manual_seed(__A )
else:
_A = torch.Generator(device=__A ).manual_seed(__A )
if pil_image:
_A = input_image * 0.5 + 0.5
_A = input_image.clamp(0 , 1 )
_A = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_A = DiffusionPipeline.numpy_to_pil(__A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def __A ( self: List[str] ) -> Union[str, Any]:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(**__A )
_A = output.images
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(
**__A , return_dict=__A , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array(
[
0.9_997,
0.0_002,
0.9_997,
0.9_997,
0.9_969,
0.0_023,
0.9_997,
0.9_969,
0.9_970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self: Optional[int] ) -> Tuple:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(**__A )
_A = output.images
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(
**__A , return_dict=__A , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.9_997, 0.0_003, 0.9_997, 0.9_997, 0.9_970, 0.0_024, 0.9_997, 0.9_971, 0.9_971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self: Any ) -> Dict:
_A = '''cpu'''
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
_A = pipe(**__A )
_A = output.images
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
_A = pipe(
**__A , return_dict=__A , )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_A = np.array(
[
0.9_997,
0.9_989,
0.0_008,
0.0_021,
0.9_960,
0.0_018,
0.0_014,
0.0_002,
0.9_933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __A ( self: List[str] ) -> Tuple:
_A = torch.device('''cpu''' )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
A_ = 1
_A = self.get_dummy_components()
_A = self.pipeline_class(**__A )
_A = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
_A = torch.Generator(device=__A ).manual_seed(0 )
_A = pipe.decoder.dtype
_A = 1
_A = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_A = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
_A = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_A = pipe.prepare_latents(
__A , dtype=__A , device=__A , generator=__A , latents=__A , scheduler=DummyScheduler() )
_A = self.get_dummy_inputs(__A , pil_image=__A )
_A = pipe(
**__A , decoder_latents=__A , super_res_latents=__A ).images
_A = self.get_dummy_inputs(__A , pil_image=__A )
# Don't pass image, instead pass embedding
_A = pipeline_inputs.pop('''image''' )
_A = pipe.image_encoder(__A ).image_embeds
_A = pipe(
**__A , decoder_latents=__A , super_res_latents=__A , image_embeddings=__A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def __A ( self: Dict ) -> int:
_A = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_A = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=__A , expected_max_diff=__A )
@skip_mps
def __A ( self: Any ) -> str:
_A = torch_device == '''cpu'''
_A = True
_A = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__A , relax_max_difference=__A , additional_params_copy_to_batched_inputs=__A , )
def __A ( self: Dict ) -> Dict:
_A = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_A = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__A , additional_params_copy_to_batched_inputs=__A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__A )
@skip_mps
def __A ( self: Optional[int] ) -> Optional[Any]:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __A ( self: Any ) -> Any:
return super().test_save_load_local()
@skip_mps
def __A ( self: Tuple ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __A ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self: int ) -> List[str]:
_A = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
_A = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
_A = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
_A = pipeline.to(__A )
pipeline.set_progress_bar_config(disable=__A )
_A = torch.Generator(device='''cpu''' ).manual_seed(0 )
_A = pipeline(
__A , generator=__A , output_type='''np''' , )
_A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert_mean_pixel_difference(__A , __A , 15 )
| 75 | 1 |
import numpy as np
def __UpperCamelCase ( _A , _A ):
return np.where(vector > 0 , _snake_case , (alpha * (np.exp(_snake_case ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 278 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : List[str] = "facebook/wmt19-en-de"
snake_case : Dict = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : int = FSMTForConditionalGeneration(config)
print(F"num of params {tiny_model.num_parameters()}")
# Test
snake_case : Optional[Any] = tokenizer(["Making tiny model"], return_tensors="pt")
snake_case : List[str] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
snake_case : Dict = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"Generated {mname_tiny}")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 281 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass(frozen=__lowerCamelCase)
class lowerCamelCase__:
UpperCAmelCase__ : str
UpperCAmelCase__ : str
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
UpperCAmelCase__ : Optional[str] = None
@dataclass(frozen=__lowerCamelCase)
class lowerCamelCase__:
UpperCAmelCase__ : List[int]
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[List[int]] = None
UpperCAmelCase__ : Optional[Union[int, float]] = None
UpperCAmelCase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : List[InputFeatures]
def __init__( self: Tuple , UpperCamelCase_: str , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: str , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: bool = False , ):
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = os.path.join(
UpperCamelCase_ , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(UpperCamelCase_ ) , UpperCamelCase_ , ) , )
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase, __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCamelCase = cached_features_file + """.lock"""
with FileLock(UpperCamelCase_ ):
if os.path.exists(UpperCamelCase_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
__lowerCamelCase = torch.load(UpperCamelCase_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
__lowerCamelCase = (
processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
)
logger.info("""Training examples: %s""" , len(UpperCamelCase_ ) )
__lowerCamelCase = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
logger.info("""Saving features into cached file %s""" , UpperCamelCase_ )
torch.save(self.features , UpperCamelCase_ )
def __len__( self: Optional[int] ):
return len(self.features )
def __getitem__( self: Any , UpperCamelCase_: Optional[int] ):
return self.features[i]
def lowerCAmelCase__ ( self: Dict ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase__:
UpperCAmelCase__ : List[InputFeatures]
def __init__( self: Dict , UpperCamelCase_: str , UpperCamelCase_: PreTrainedTokenizer , UpperCamelCase_: str , UpperCamelCase_: Optional[int] = 1_28 , UpperCamelCase_: Dict=False , UpperCamelCase_: bool = False , ):
__lowerCamelCase = hans_processors[task]()
__lowerCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__lowerCamelCase, __lowerCamelCase = label_list[2], label_list[1]
__lowerCamelCase = label_list
__lowerCamelCase = processor.get_dev_examples(UpperCamelCase_ ) if evaluate else processor.get_train_examples(UpperCamelCase_ )
__lowerCamelCase = hans_convert_examples_to_features(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(UpperCamelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__lowerCamelCase = tf.data.Dataset.from_generator(
UpperCamelCase_ , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowerCAmelCase__ ( self: Optional[int] ):
return self.dataset
def __len__( self: Tuple ):
return len(self.features )
def __getitem__( self: Dict , UpperCamelCase_: List[Any] ):
return self.features[i]
def lowerCAmelCase__ ( self: Dict ):
return self.label_list
class lowerCamelCase__( __lowerCamelCase):
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_train_set.txt""" ) ) , """train""" )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: int ):
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase_ , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def lowerCAmelCase__ ( self: List[Any] ):
return ["contradiction", "entailment", "neutral"]
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] ):
__lowerCamelCase = []
for i, line in enumerate(UpperCamelCase_ ):
if i == 0:
continue
__lowerCamelCase = """%s-%s""" % (set_type, line[0])
__lowerCamelCase = line[5]
__lowerCamelCase = line[6]
__lowerCamelCase = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
__lowerCamelCase = line[0]
examples.append(InputExample(guid=UpperCamelCase_ , text_a=UpperCamelCase_ , text_b=UpperCamelCase_ , label=UpperCamelCase_ , pairID=UpperCamelCase_ ) )
return examples
def lowerCamelCase__ ( A__ : List[InputExample] , A__ : List[str] , A__ : int , A__ : PreTrainedTokenizer , ):
'''simple docstring'''
__lowerCamelCase = {label: i for i, label in enumerate(A__ )}
__lowerCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(A__ ) , desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
__lowerCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=A__ , max_length=A__ , padding="""max_length""" , truncation=A__ , return_overflowing_tokens=A__ , )
__lowerCamelCase = label_map[example.label] if example.label in label_map else 0
__lowerCamelCase = int(example.pairID )
features.append(InputFeatures(**A__ , label=A__ , pairID=A__ ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f'guid: {example}' )
logger.info(f'features: {features[i]}' )
return features
UpperCAmelCase_ = {
'hans': 3,
}
UpperCAmelCase_ = {
'hans': HansProcessor,
}
| 29 |
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase__ ( A__ : Tuple , A__ : Optional[int]=0.999 , A__ : Any="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ : Any ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ : Optional[int] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
__lowerCamelCase = []
for i in range(A__ ):
__lowerCamelCase = i / num_diffusion_timesteps
__lowerCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase):
UpperCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
UpperCAmelCase__ : Any = 2
@register_to_config
def __init__( self: List[str] , UpperCamelCase_: int = 10_00 , UpperCamelCase_: float = 0.0_0085 , UpperCamelCase_: float = 0.012 , UpperCamelCase_: str = "linear" , UpperCamelCase_: Optional[Union[np.ndarray, List[float]]] = None , UpperCamelCase_: str = "epsilon" , UpperCamelCase_: str = "linspace" , UpperCamelCase_: int = 0 , ):
if trained_betas is not None:
__lowerCamelCase = torch.tensor(UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
__lowerCamelCase = torch.linspace(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCamelCase = betas_for_alpha_bar(UpperCamelCase_ )
else:
raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' )
__lowerCamelCase = 1.0 - self.betas
__lowerCamelCase = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[Any]=None ):
if schedule_timesteps is None:
__lowerCamelCase = self.timesteps
__lowerCamelCase = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__lowerCamelCase = 1 if len(UpperCamelCase_ ) > 1 else 0
else:
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
__lowerCamelCase = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self: Optional[int] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: Union[float, torch.FloatTensor] , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
else:
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: int , UpperCamelCase_: Union[str, torch.device] = None , UpperCamelCase_: Optional[int] = None , ):
__lowerCamelCase = num_inference_steps
__lowerCamelCase = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__lowerCamelCase = np.linspace(0 , num_train_timesteps - 1 , UpperCamelCase_ , dtype=UpperCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__lowerCamelCase = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(0 , UpperCamelCase_ ) * step_ratio).round()[::-1].copy().astype(UpperCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__lowerCamelCase = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__lowerCamelCase = (np.arange(UpperCamelCase_ , 0 , -step_ratio )).round().copy().astype(UpperCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
__lowerCamelCase = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__lowerCamelCase = torch.from_numpy(np.log(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = np.interp(UpperCamelCase_ , np.arange(0 , len(UpperCamelCase_ ) ) , UpperCamelCase_ )
__lowerCamelCase = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ )
# interpolate sigmas
__lowerCamelCase = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__lowerCamelCase = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__lowerCamelCase = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(UpperCamelCase_ ).startswith("""mps""" ):
# mps does not support float64
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=torch.floataa )
else:
__lowerCamelCase = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
# interpolate timesteps
__lowerCamelCase = self.sigma_to_t(UpperCamelCase_ ).to(UpperCamelCase_ , dtype=timesteps.dtype )
__lowerCamelCase = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__lowerCamelCase = torch.cat([timesteps[:1], interleaved_timesteps] )
__lowerCamelCase = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__lowerCamelCase = defaultdict(UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
# get log sigma
__lowerCamelCase = sigma.log()
# get distribution
__lowerCamelCase = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__lowerCamelCase = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__lowerCamelCase = low_idx + 1
__lowerCamelCase = self.log_sigmas[low_idx]
__lowerCamelCase = self.log_sigmas[high_idx]
# interpolate sigmas
__lowerCamelCase = (low - log_sigma) / (low - high)
__lowerCamelCase = w.clamp(0 , 1 )
# transform interpolation to time range
__lowerCamelCase = (1 - w) * low_idx + w * high_idx
__lowerCamelCase = t.view(sigma.shape )
return t
@property
def lowerCAmelCase__ ( self: Dict ):
return self.sample is None
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: Union[float, torch.FloatTensor] , UpperCamelCase_: Union[torch.FloatTensor, np.ndarray] , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self.index_for_timestep(UpperCamelCase_ )
# advance index counter by 1
__lowerCamelCase = timestep.cpu().item() if torch.is_tensor(UpperCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__lowerCamelCase = self.sigmas[step_index]
__lowerCamelCase = self.sigmas_interpol[step_index + 1]
__lowerCamelCase = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__lowerCamelCase = self.sigmas[step_index - 1]
__lowerCamelCase = self.sigmas_interpol[step_index]
__lowerCamelCase = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__lowerCamelCase = 0
__lowerCamelCase = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__lowerCamelCase = sigma_hat if self.state_in_first_order else sigma_interpol
__lowerCamelCase = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("""prediction_type not implemented yet: sample""" )
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__lowerCamelCase = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__lowerCamelCase = sigma_interpol - sigma_hat
# store for 2nd order step
__lowerCamelCase = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__lowerCamelCase = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__lowerCamelCase = sigma_next - sigma_hat
__lowerCamelCase = self.sample
__lowerCamelCase = None
__lowerCamelCase = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCamelCase_ ):
# mps does not support float64
__lowerCamelCase = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__lowerCamelCase = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__lowerCamelCase = self.timesteps.to(original_samples.device )
__lowerCamelCase = timesteps.to(original_samples.device )
__lowerCamelCase = [self.index_for_timestep(UpperCamelCase_ , UpperCamelCase_ ) for t in timesteps]
__lowerCamelCase = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__lowerCamelCase = sigma.unsqueeze(-1 )
__lowerCamelCase = original_samples + noise * sigma
return noisy_samples
def __len__( self: Tuple ):
return self.config.num_train_timesteps
| 29 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : List[Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase : Union[str, Any] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
lowerCamelCase : List[Any] = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def _lowerCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =(
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_SCREAMING_SNAKE_CASE =bs[:]
_SCREAMING_SNAKE_CASE =0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
_SCREAMING_SNAKE_CASE =[chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_SCREAMING_SNAKE_CASE =char
return pairs
class A__ ( A__ ):
A__ = VOCAB_FILES_NAMES
A__ = PRETRAINED_VOCAB_FILES_MAP
A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ = ['input_ids', 'attention_mask']
def __init__( self : str , _a : Optional[int] , _a : Optional[int] , _a : List[str]="replace" , _a : List[str]="<s>" , _a : int="</s>" , _a : List[Any]="</s>" , _a : Optional[Any]="<s>" , _a : Any="<unk>" , _a : Optional[int]="<pad>" , _a : int="<mask>" , _a : List[str]=False , **_a : Tuple , ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else unk_token
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_SCREAMING_SNAKE_CASE =AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
errors=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , **_a , )
with open(_a , encoding='utf-8' ) as vocab_handle:
_SCREAMING_SNAKE_CASE =json.load(_a )
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.encoder.items()}
_SCREAMING_SNAKE_CASE =errors # how to handle errors in decoding
_SCREAMING_SNAKE_CASE =bytes_to_unicode()
_SCREAMING_SNAKE_CASE ={v: k for k, v in self.byte_encoder.items()}
with open(_a , encoding='utf-8' ) as merges_handle:
_SCREAMING_SNAKE_CASE =merges_handle.read().split('\n' )[1:-1]
_SCREAMING_SNAKE_CASE =[tuple(merge.split() ) for merge in bpe_merges]
_SCREAMING_SNAKE_CASE =dict(zip(_a , range(len(_a ) ) ) )
_SCREAMING_SNAKE_CASE ={}
_SCREAMING_SNAKE_CASE =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_SCREAMING_SNAKE_CASE =re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def A ( self : Any ) -> str:
'''simple docstring'''
return len(self.encoder )
def A ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A ( self : Union[str, Any] , _a : Tuple ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_SCREAMING_SNAKE_CASE =tuple(_a )
_SCREAMING_SNAKE_CASE =get_pairs(_a )
if not pairs:
return token
while True:
_SCREAMING_SNAKE_CASE =min(_a , key=lambda _a : self.bpe_ranks.get(_a , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =bigram
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =0
while i < len(_a ):
try:
_SCREAMING_SNAKE_CASE =word.index(_a , _a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_SCREAMING_SNAKE_CASE =j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_SCREAMING_SNAKE_CASE =tuple(_a )
_SCREAMING_SNAKE_CASE =new_word
if len(_a ) == 1:
break
else:
_SCREAMING_SNAKE_CASE =get_pairs(_a )
_SCREAMING_SNAKE_CASE =' '.join(_a )
_SCREAMING_SNAKE_CASE =word
return word
def A ( self : Optional[Any] , _a : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for token in re.findall(self.pat , _a ):
_SCREAMING_SNAKE_CASE =''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(' ' ) )
return bpe_tokens
def A ( self : Dict , _a : List[str] ) -> int:
'''simple docstring'''
return self.encoder.get(_a , self.encoder.get(self.unk_token ) )
def A ( self : Dict , _a : List[str] ) -> Tuple:
'''simple docstring'''
return self.decoder.get(_a )
def A ( self : int , _a : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =''.join(_a )
_SCREAMING_SNAKE_CASE =bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def A ( self : List[Any] , _a : str , _a : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_SCREAMING_SNAKE_CASE =os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_a , ensure_ascii=_a ) + '\n' )
_SCREAMING_SNAKE_CASE =0
with open(_a , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
_SCREAMING_SNAKE_CASE =token_index
writer.write(' '.join(_a ) + '\n' )
index += 1
return vocab_file, merge_file
def A ( self : Tuple , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A ( self : Optional[int] , _a : List[int] , _a : Optional[List[int]] = None , _a : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def A ( self : Any , _a : List[int] , _a : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[self.sep_token_id]
_SCREAMING_SNAKE_CASE =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A ( self : List[Any] , _a : Tuple , _a : List[Any]=False , **_a : Optional[int] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
_SCREAMING_SNAKE_CASE =' ' + text
return (text, kwargs)
| 47 |
'''simple docstring'''
class A__ :
def __init__( self : Union[str, Any] , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =size
_SCREAMING_SNAKE_CASE =[0] * size
_SCREAMING_SNAKE_CASE =[0] * size
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A ( _a : int ) -> int:
'''simple docstring'''
return (index & (index + 1)) - 1
def A ( self : Tuple , _a : int , _a : int ) -> None:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =value
while index < self.size:
_SCREAMING_SNAKE_CASE =self.get_prev(_a ) + 1
if current_left_border == index:
_SCREAMING_SNAKE_CASE =value
else:
_SCREAMING_SNAKE_CASE =max(_a , _a , _a )
_SCREAMING_SNAKE_CASE =self.get_next(_a )
def A ( self : int , _a : int , _a : int ) -> int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_SCREAMING_SNAKE_CASE =0
while left <= right:
_SCREAMING_SNAKE_CASE =self.get_prev(_a )
if left <= current_left:
_SCREAMING_SNAKE_CASE =max(_a , self.tree[right] )
_SCREAMING_SNAKE_CASE =current_left
else:
_SCREAMING_SNAKE_CASE =max(_a , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 47 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_a : List[Any]= logging.get_logger(__name__)
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> YolosConfig:
'''simple docstring'''
__snake_case : Optional[int] = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__snake_case : Optional[Any] = 1_92
__snake_case : Optional[Any] = 7_68
__snake_case : int = 12
__snake_case : Dict = 3
__snake_case : Tuple = [8_00, 13_33]
__snake_case : Tuple = False
elif yolos_name == "yolos_s_dWr":
__snake_case : List[Any] = 3_30
__snake_case : List[str] = 14
__snake_case : Union[str, Any] = 6
__snake_case : Dict = 13_20
elif "yolos_s" in yolos_name:
__snake_case : Optional[int] = 3_84
__snake_case : Tuple = 15_36
__snake_case : str = 12
__snake_case : int = 6
elif "yolos_b" in yolos_name:
__snake_case : Optional[int] = [8_00, 13_44]
__snake_case : str = 91
__snake_case : int = 'huggingface/label-files'
__snake_case : List[Any] = 'coco-detection-id2label.json'
__snake_case : int = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='dataset' ) , 'r' ) )
__snake_case : Optional[Any] = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case : Union[str, Any] = idalabel
__snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : YolosConfig , UpperCAmelCase_ : bool = False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : Any = state_dict.pop(F"blocks.{i}.attn.qkv.weight" )
__snake_case : Any = state_dict.pop(F"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Dict = in_proj_weight[: config.hidden_size, :]
__snake_case : Any = in_proj_bias[: config.hidden_size]
__snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : int = in_proj_weight[-config.hidden_size :, :]
__snake_case : List[str] = in_proj_bias[-config.hidden_size :]
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> str:
'''simple docstring'''
if "backbone" in name:
__snake_case : Dict = name.replace('backbone' , 'vit' )
if "cls_token" in name:
__snake_case : Any = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
__snake_case : Optional[int] = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
__snake_case : List[str] = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
__snake_case : Any = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
__snake_case : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
__snake_case : List[Any] = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
__snake_case : Any = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__snake_case : Dict = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__snake_case : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__snake_case : Optional[Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__snake_case : List[str] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__snake_case : Optional[Any] = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
__snake_case : Optional[Any] = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
__snake_case : int = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
__snake_case : Any = name.replace('vit.norm' , 'vit.layernorm' )
return name
def __UpperCAmelCase ( UpperCAmelCase_ : dict , UpperCAmelCase_ : YolosForObjectDetection ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__snake_case : int = orig_state_dict.pop(UpperCAmelCase_ )
if "qkv" in key:
__snake_case : str = key.split('.' )
__snake_case : int = int(key_split[2] )
__snake_case : Tuple = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__snake_case : Any = val[:dim, :]
__snake_case : Union[str, Any] = val[
dim : dim * 2, :
]
__snake_case : Any = val[-dim:, :]
else:
__snake_case : Optional[int] = val[:dim]
__snake_case : List[str] = val[dim : dim * 2]
__snake_case : List[Any] = val[-dim:]
else:
__snake_case : Tuple = val
return orig_state_dict
def __UpperCAmelCase ( ) -> torch.Tensor:
'''simple docstring'''
__snake_case : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case : List[Any] = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ) -> int:
'''simple docstring'''
__snake_case : Optional[Any] = get_yolos_config(UpperCAmelCase_ )
# load original state_dict
__snake_case : Union[str, Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )['model']
# load 🤗 model
__snake_case : Optional[Any] = YolosForObjectDetection(UpperCAmelCase_ )
model.eval()
__snake_case : int = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by YolosImageProcessor
__snake_case : Optional[Any] = 8_00 if yolos_name != 'yolos_ti' else 5_12
__snake_case : Dict = YolosImageProcessor(format='coco_detection' , size=UpperCAmelCase_ )
__snake_case : int = image_processor(images=prepare_img() , return_tensors='pt' )
__snake_case : int = model(**UpperCAmelCase_ )
__snake_case : List[str] = outputs.logits, outputs.pred_boxes
__snake_case : Dict = None, None
if yolos_name == "yolos_ti":
__snake_case : Dict = torch.tensor(
[[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] )
__snake_case : Tuple = torch.tensor(
[[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] )
elif yolos_name == "yolos_s_200_pre":
__snake_case : Tuple = torch.tensor(
[[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] )
__snake_case : Tuple = torch.tensor(
[[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] )
elif yolos_name == "yolos_s_300_pre":
__snake_case : Optional[Any] = torch.tensor(
[[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] )
__snake_case : Tuple = torch.tensor(
[[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] )
elif yolos_name == "yolos_s_dWr":
__snake_case : str = torch.tensor(
[[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] )
__snake_case : Tuple = torch.tensor(
[[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] )
elif yolos_name == "yolos_base":
__snake_case : str = torch.tensor(
[[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] )
__snake_case : Dict = torch.tensor(
[[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] )
else:
raise ValueError(F"Unknown yolos_name: {yolos_name}" )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , UpperCAmelCase_ , atol=1E-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(F"Saving model {yolos_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
__snake_case : Any = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
__snake_case : Optional[int] = model_mapping[yolos_name]
image_processor.push_to_hub(UpperCAmelCase_ , organization='hustvl' )
model.push_to_hub(UpperCAmelCase_ , organization='hustvl' )
if __name__ == "__main__":
_a : Any= argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--yolos_name",
default="yolos_s_200_pre",
type=str,
help=(
"Name of the YOLOS model you'd like to convert. Should be one of 'yolos_ti', 'yolos_s_200_pre',"
" 'yolos_s_300_pre', 'yolos_s_dWr', 'yolos_base'."
),
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original state dict (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a : str= parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 356 | """simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
_a : Any= logging.get_logger(__name__)
_a : str= {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_a : Optional[Any]= [
"small",
"small-base",
"medium",
"medium-base",
"intermediate",
"intermediate-base",
"large",
"large-base",
"xlarge",
"xlarge-base",
]
_a : List[Any]= {
"vocab_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json",
"funnel-transformer/small-base": (
"https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"
),
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json",
"funnel-transformer/medium-base": (
"https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json",
"funnel-transformer/large-base": (
"https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"
),
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json",
"funnel-transformer/xlarge-base": (
"https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"
),
},
}
_a : str= {f'''funnel-transformer/{name}''': 512 for name in _model_names}
_a : List[Any]= {f'''funnel-transformer/{name}''': {"do_lower_case": True} for name in _model_names}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : int = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Tuple = FunnelTokenizer
UpperCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : int = 2
def __init__(self : int , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=True , _A : List[str]="<unk>" , _A : Any="<sep>" , _A : Dict="<pad>" , _A : Tuple="<cls>" , _A : Dict="<mask>" , _A : Optional[Any]="<s>" , _A : List[Any]="</s>" , _A : Optional[int]=True , _A : Dict=True , _A : Tuple=None , _A : int="##" , **_A : Any , ) -> str:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , bos_token=_A , eos_token=_A , clean_text=_A , tokenize_chinese_chars=_A , strip_accents=_A , wordpieces_prefix=_A , **_A , )
__snake_case : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , _A) != do_lower_case
or normalizer_state.get('strip_accents' , _A) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _A) != tokenize_chinese_chars
):
__snake_case : List[str] = getattr(_A , normalizer_state.pop('type'))
__snake_case : int = do_lower_case
__snake_case : Optional[int] = strip_accents
__snake_case : str = tokenize_chinese_chars
__snake_case : Optional[int] = normalizer_class(**_A)
__snake_case : str = do_lower_case
def _lowercase (self : Optional[Any] , _A : Dict , _A : Tuple=None) -> Any:
__snake_case : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase (self : str , _A : List[int] , _A : Optional[List[int]] = None) -> List[int]:
__snake_case : Union[str, Any] = [self.sep_token_id]
__snake_case : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0]
return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _lowercase (self : Tuple , _A : str , _A : Optional[str] = None) -> Tuple[str]:
__snake_case : int = self._tokenizer.model.save(_A , name=_A)
return tuple(_A)
| 95 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = "levit"
def __init__( self, lowerCAmelCase__=224, lowerCAmelCase__=3, lowerCAmelCase__=3, lowerCAmelCase__=2, lowerCAmelCase__=1, lowerCAmelCase__=16, lowerCAmelCase__=[128, 256, 384], lowerCAmelCase__=[4, 8, 12], lowerCAmelCase__=[4, 4, 4], lowerCAmelCase__=[16, 16, 16], lowerCAmelCase__=0, lowerCAmelCase__=[2, 2, 2], lowerCAmelCase__=[2, 2, 2], lowerCAmelCase__=0.02, **lowerCAmelCase__, ) -> Optional[Any]:
super().__init__(**lowerCAmelCase__)
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = kernel_size
snake_case_ = stride
snake_case_ = padding
snake_case_ = hidden_sizes
snake_case_ = num_attention_heads
snake_case_ = depths
snake_case_ = key_dim
snake_case_ = drop_path_rate
snake_case_ = patch_size
snake_case_ = attention_ratio
snake_case_ = mlp_ratio
snake_case_ = initializer_range
snake_case_ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE_ = version.parse("1.11" )
@property
def a_ ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def a_ ( self) -> float:
return 1e-4
| 69 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowerCAmelCase : Any = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None ):
if rng is None:
SCREAMING_SNAKE_CASE_: List[Any] = random.Random()
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE_: Optional[Any] = []
for _ in range(_UpperCAmelCase ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE_: List[Any] = np.array(_UpperCAmelCase , dtype=jnp.intaa ).reshape(_UpperCAmelCase )
return output
def A_ ( _UpperCAmelCase , _UpperCAmelCase=None ):
SCREAMING_SNAKE_CASE_: Optional[int] = ids_tensor(_UpperCAmelCase , vocab_size=2 , rng=_UpperCAmelCase )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE_: Optional[Any] = 1
return attn_mask
@require_flax
class __lowercase :
"""simple docstring"""
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = ()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[int] = inputs["input_ids"].shape[-1] // 2
SCREAMING_SNAKE_CASE_: List[str] = inputs["input_ids"][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE_: Any = jnp.ones_like(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE_: Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE_: Optional[Any] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Union[str, Any] = False
SCREAMING_SNAKE_CASE_: Dict = max_length
SCREAMING_SNAKE_CASE_: List[Any] = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE_: List[Any] = getattr(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = pt_model_class(lowerCAmelCase__).eval()
SCREAMING_SNAKE_CASE_: str = load_flax_weights_in_pytorch_model(lowerCAmelCase__ , flax_model.params)
SCREAMING_SNAKE_CASE_: List[Any] = flax_model.generate(lowerCAmelCase__).sequences
SCREAMING_SNAKE_CASE_: str = pt_model.generate(torch.tensor(lowerCAmelCase__ , dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE_: List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Dict = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Dict = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: int = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
SCREAMING_SNAKE_CASE_: Optional[int] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[int] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: str = False
SCREAMING_SNAKE_CASE_: int = max_length
SCREAMING_SNAKE_CASE_: str = 2
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences)
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = max_length
SCREAMING_SNAKE_CASE_: Any = 0.8
SCREAMING_SNAKE_CASE_: Any = 10
SCREAMING_SNAKE_CASE_: List[str] = 0.3
SCREAMING_SNAKE_CASE_: Tuple = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: int = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE_: Any = max_length
SCREAMING_SNAKE_CASE_: List[str] = 2
SCREAMING_SNAKE_CASE_: str = 1
SCREAMING_SNAKE_CASE_: Tuple = 8
SCREAMING_SNAKE_CASE_: List[Any] = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = model.generate(lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[str] = jit_generate(lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Any = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: List[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: List[Any] = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[int] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jit(model.generate)
SCREAMING_SNAKE_CASE_: Optional[Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE_: Dict = attention_mask.at[(0, 0)].set(0)
SCREAMING_SNAKE_CASE_: Optional[Any] = 2
SCREAMING_SNAKE_CASE_: Any = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = model.generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = jit(model.generate)
SCREAMING_SNAKE_CASE_: Union[str, Any] = jit_generate(lowerCAmelCase__ , attention_mask=lowerCAmelCase__).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist())
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert")
SCREAMING_SNAKE_CASE_: List[Any] = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only")
SCREAMING_SNAKE_CASE_: Optional[int] = "Hello world"
SCREAMING_SNAKE_CASE_: List[Any] = tokenizer(lowerCAmelCase__ , return_tensors="np").input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase__ , "do_samples"):
model.generate(lowerCAmelCase__ , do_samples=lowerCAmelCase__)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase__ , "foo"):
SCREAMING_SNAKE_CASE_: str = {"foo": "bar"}
model.generate(lowerCAmelCase__ , **lowerCAmelCase__)
| 13 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( __lowercase , unittest.TestCase):
_SCREAMING_SNAKE_CASE : Tuple = KandinskyVaaImgaImgPipeline
_SCREAMING_SNAKE_CASE : int = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_SCREAMING_SNAKE_CASE : int = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_SCREAMING_SNAKE_CASE : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_SCREAMING_SNAKE_CASE : Optional[Any] = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 1_00
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCAmelCase__ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_unet
lowerCAmelCase__ = self.dummy_movq
lowerCAmelCase__ = {
'num_train_timesteps': 10_00,
'beta_schedule': 'linear',
'beta_start': 0.0_00_85,
'beta_end': 0.0_12,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCAmelCase__ = DDIMScheduler(**_UpperCamelCase )
lowerCAmelCase__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCamelCase )
# create init_image
lowerCAmelCase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((2_56, 2_56) )
if str(_UpperCamelCase ).startswith('mps' ):
lowerCAmelCase__ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase__ = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu'
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**_UpperCamelCase )
lowerCAmelCase__ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ = np.array(
[0.6_19_97_78, 0.63_98_44_06, 0.46_14_57_85, 0.62_94_49_84, 0.5_62_22_15, 0.47_30_61_32, 0.47_44_14_56, 0.4_60_76_06, 0.48_71_92_63] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_img2img_frog.npy' )
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCAmelCase__ = 'A red cartoon frog, 4k'
lowerCAmelCase__ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
lowerCAmelCase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowerCAmelCase__ = pipeline(
image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 122 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = (32, 32)
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
return CLIPTextModel(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , max_noise_level=3_50 , )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase__ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , max_noise_level=3_50 , )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ = output.images
assert image.shape[0] == 2
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase__ = unet.half()
lowerCAmelCase__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , max_noise_level=3_50 , )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type='np' , ).images
lowerCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
lowerCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = 'a cat sitting on a park bench'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
lowerCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = 'a cat sitting on a park bench'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = 'a cat sitting on a park bench'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , output_type='np' , )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 122 | 1 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ (a_ , unittest.TestCase ):
UpperCAmelCase__ = XLMTokenizer
UpperCAmelCase__ = False
def _lowercase ( self ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def _lowercase ( self , _A ):
'''simple docstring'''
UpperCAmelCase = '''lower newer'''
UpperCAmelCase = '''lower newer'''
return input_text, output_text
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase = '''lower'''
UpperCAmelCase = ['''low''', '''er</w>''']
UpperCAmelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase = tokens + ['''<unk>''']
UpperCAmelCase = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
UpperCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 273 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__A : Dict = logging.get_logger(__name__)
__A : str = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class A_ (a_ ):
UpperCAmelCase__ = '''longformer'''
def __init__( self , _A = 5_1_2 , _A = 2 , _A = 1 , _A = 0 , _A = 2 , _A = 3_0_5_2_2 , _A = 7_6_8 , _A = 1_2 , _A = 1_2 , _A = 3_0_7_2 , _A = "gelu" , _A = 0.1 , _A = 0.1 , _A = 5_1_2 , _A = 2 , _A = 0.02 , _A = 1E-12 , _A = False , **_A , ):
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase = attention_window
UpperCAmelCase = sep_token_id
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = onnx_export
class A_ (a_ ):
def __init__( self , _A , _A = "default" , _A = None ):
'''simple docstring'''
super().__init__(_A , _A , _A )
UpperCAmelCase = True
@property
def _lowercase ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = super().outputs
if self.task == "default":
UpperCAmelCase = {0: '''batch'''}
return outputs
@property
def _lowercase ( self ):
'''simple docstring'''
return 1E-4
@property
def _lowercase ( self ):
'''simple docstring'''
return max(super().default_onnx_opset , 1_4 )
def _lowercase ( self , _A , _A = -1 , _A = -1 , _A = False , _A = None , ):
'''simple docstring'''
UpperCAmelCase = super().generate_dummy_inputs(
preprocessor=_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
UpperCAmelCase = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
UpperCAmelCase = 1
return inputs
| 273 | 1 |
from statistics import mean
import numpy as np
def A ( lowercase , lowercase , lowercase , lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = 0
# Number of processes finished
UpperCamelCase = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCamelCase = [0] * no_of_process
# List to include calculation results
UpperCamelCase = [0] * no_of_process
# Sort by arrival time.
UpperCamelCase = [burst_time[i] for i in np.argsort(lowercase )]
UpperCamelCase = [process_name[i] for i in np.argsort(lowercase )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCamelCase = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCamelCase = arrival_time[i]
UpperCamelCase = 0
# Index showing the location of the process being performed
UpperCamelCase = 0
# Saves the current response ratio.
UpperCamelCase = 0
for i in range(0 , lowercase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCamelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCamelCase = temp
UpperCamelCase = i
# Calculate the turn around time
UpperCamelCase = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCamelCase = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A ( lowercase , lowercase , lowercase , lowercase ) -> list:
'''simple docstring'''
UpperCamelCase = [0] * no_of_process
for i in range(0 , lowercase ):
UpperCamelCase = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
_UpperCAmelCase : List[str] = 5
_UpperCAmelCase : Tuple = ["A", "B", "C", "D", "E"]
_UpperCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5]
_UpperCAmelCase : int = [1, 2, 3, 4, 5]
_UpperCAmelCase : str = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
_UpperCAmelCase : Dict = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 110 |
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = get_activation('swish' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = get_activation('silu' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = get_activation('mish' )
self.assertIsInstance(A_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = get_activation('gelu' )
self.assertIsInstance(A_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 110 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
A_ : str = logging.get_logger(__name__)
A_ : str = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Union[str, Any] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
A_ : Dict = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
A_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
A_ : List[str] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
A_ : Optional[int] = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
A_ : int = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
A_ : Tuple = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
A_ : Tuple = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
A_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
A_ : Tuple = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
A_ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
A_ : int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
A_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
A_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
A_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
A_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
A_ : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
A_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
A_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
A_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModel)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_PRETRAINING_MAPPING
A_ : str = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
A_ : Optional[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MASKED_LM_MAPPING
A_ : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
A_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
A_ : Dict = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
A_ : Any = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
A_ : Tuple = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
A_ : int = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
A_ : Tuple = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class A_ ( _BaseAutoModelClass ):
'''simple docstring'''
a__ = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
A_ : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 333 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=2 , snake_case_=True , snake_case_=False , snake_case_=1_0 , snake_case_=3 , snake_case_=3_2 * 8 , snake_case_=3_2 * 8 , snake_case_=4 , snake_case_=6_4 , ):
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Optional[Any] = is_training
UpperCAmelCase_ : Optional[Any] = use_auxiliary_loss
UpperCAmelCase_ : int = num_queries
UpperCAmelCase_ : Tuple = num_channels
UpperCAmelCase_ : Optional[int] = min_size
UpperCAmelCase_ : Optional[int] = max_size
UpperCAmelCase_ : int = num_labels
UpperCAmelCase_ : List[Any] = hidden_dim
UpperCAmelCase_ : Any = hidden_dim
def _UpperCamelCase ( self ):
UpperCAmelCase_ : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
UpperCAmelCase_ : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
UpperCAmelCase_ : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
UpperCAmelCase_ : List[str] = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
UpperCAmelCase_ : str = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCamelCase ( self ):
UpperCAmelCase_ : str = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase_ : List[str] = self.num_queries
UpperCAmelCase_ : Any = self.num_labels
UpperCAmelCase_ : Union[str, Any] = [1, 1, 1, 1]
UpperCAmelCase_ : List[str] = self.num_channels
UpperCAmelCase_ : Union[str, Any] = 6_4
UpperCAmelCase_ : Tuple = 1_2_8
UpperCAmelCase_ : Tuple = self.hidden_dim
UpperCAmelCase_ : str = self.hidden_dim
UpperCAmelCase_ : List[str] = self.hidden_dim
return config
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Dict = self.prepare_config_and_inputs()
UpperCAmelCase_ : Union[str, Any] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _UpperCamelCase ( self , snake_case_ , snake_case_ ):
UpperCAmelCase_ : Optional[Any] = output.encoder_hidden_states
UpperCAmelCase_ : Optional[int] = output.pixel_decoder_hidden_states
UpperCAmelCase_ : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_layers )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False ):
with torch.no_grad():
UpperCAmelCase_ : List[Any] = MaskaFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : List[str] = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCAmelCase_ : Any = model(snake_case_ , output_hidden_states=snake_case_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
UpperCAmelCase_ : int = MaskaFormerForUniversalSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
UpperCAmelCase_ : Any = model(snake_case_ )
comm_check_on_output(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :List[str] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase_ :str = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase_ :Optional[int] = False
lowerCamelCase_ :Any = False
lowerCamelCase_ :Dict = False
lowerCamelCase_ :Optional[Any] = False
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Tuple = MaskaFormerModelTester(self )
UpperCAmelCase_ : str = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _UpperCamelCase ( self ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _UpperCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _UpperCamelCase ( self ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(snake_case_ )
UpperCAmelCase_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : List[str] = [*signature.parameters.keys()]
UpperCAmelCase_ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def _UpperCamelCase ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_ : int = MaskaFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCAmelCase_ : Tuple = {
'pixel_values': torch.randn((2, 3, *size) , device=snake_case_ ),
'mask_labels': torch.randn((2, 1_0, *size) , device=snake_case_ ),
'class_labels': torch.zeros(2 , 1_0 , device=snake_case_ ).long(),
}
UpperCAmelCase_ : Dict = self.model_tester.get_config()
UpperCAmelCase_ : List[Any] = MaskaFormerForUniversalSegmentation(snake_case_ ).to(snake_case_ )
UpperCAmelCase_ : Tuple = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def _UpperCamelCase ( self ):
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(snake_case_ ).to(snake_case_ )
UpperCAmelCase_ : Optional[Any] = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def _UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Optional[int] = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
UpperCAmelCase_ : int = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Dict = self.all_model_classes[1]
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Tuple = model_class(snake_case_ ).to(snake_case_ )
model.train()
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
UpperCAmelCase_ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_ : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ : str = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
snake_case__ : int = 1e-4
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCamelCase ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _UpperCamelCase ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _UpperCamelCase ( self ):
UpperCAmelCase_ : List[Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(snake_case_ )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : List[str] = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
UpperCAmelCase_ : List[str] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**snake_case_ )
UpperCAmelCase_ : int = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _UpperCamelCase ( self ):
UpperCAmelCase_ : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval()
UpperCAmelCase_ : Union[str, Any] = self.default_image_processor
UpperCAmelCase_ : Dict = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(snake_case_ , return_tensors='pt' ).to(snake_case_ )
UpperCAmelCase_ : Optional[int] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
UpperCAmelCase_ : int = model(**snake_case_ )
# masks_queries_logits
UpperCAmelCase_ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_ : Optional[Any] = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
UpperCAmelCase_ : Optional[Any] = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
UpperCAmelCase_ : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ : List[Any] = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _UpperCamelCase ( self ):
UpperCAmelCase_ : Optional[int] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(snake_case_ ).eval()
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
UpperCAmelCase_ : Union[str, Any] = inputs['pixel_values'].to(snake_case_ )
UpperCAmelCase_ : List[str] = [el.to(snake_case_ ) for el in inputs['mask_labels']]
UpperCAmelCase_ : Optional[Any] = [el.to(snake_case_ ) for el in inputs['class_labels']]
with torch.no_grad():
UpperCAmelCase_ : Any = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 350 | '''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ = "cpu" , snake_case_ = "openai/clip-vit-large-patch14" ):
'''simple docstring'''
UpperCAmelCase_ : Any = device
UpperCAmelCase_ : Tuple = CLIPTokenizerFast.from_pretrained(snake_case_ )
UpperCAmelCase_ : Optional[Any] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
UpperCAmelCase_ : Union[str, Any] = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
UpperCAmelCase_ : Tuple = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase_ : Optional[Any] = torchvision.transforms.Resize(2_2_4 )
UpperCAmelCase_ : Any = torchvision.transforms.CenterCrop(2_2_4 )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = self.resize(snake_case_ )
UpperCAmelCase_ : Tuple = self.center_crop(snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.normalize(snake_case_ )
return images
def __call__( self , snake_case_=None , snake_case_=None , **snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = self.tokenizer(text=snake_case_ , **snake_case_ )
UpperCAmelCase_ : Optional[Any] = self.preprocess_img(snake_case_ )
UpperCAmelCase_ : Optional[int] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case_=1_0 , snake_case_=0.01 , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_="image" , snake_case_=True , snake_case_=False , snake_case_=False , snake_case_=False , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Dict = device if device else get_device()
if vqgan:
UpperCAmelCase_ : Any = vqgan
else:
UpperCAmelCase_ : Dict = load_vqgan(self.device , conf_path=snake_case_ , ckpt_path=snake_case_ )
self.vqgan.eval()
if clip:
UpperCAmelCase_ : List[str] = clip
else:
UpperCAmelCase_ : List[Any] = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCAmelCase_ : Tuple = ProcessorGradientFlow(device=self.device )
UpperCAmelCase_ : Dict = iterations
UpperCAmelCase_ : Dict = lr
UpperCAmelCase_ : str = log
UpperCAmelCase_ : Tuple = make_grid
UpperCAmelCase_ : Union[str, Any] = return_val
UpperCAmelCase_ : List[Any] = quantize
UpperCAmelCase_ : int = self.vqgan.decoder.z_shape
def _UpperCamelCase ( self , snake_case_=None , snake_case_=None , snake_case_=5 , snake_case_=True ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = []
if output_path is None:
UpperCAmelCase_ : List[str] = './animation.gif'
if input_path is None:
UpperCAmelCase_ : List[str] = self.save_path
UpperCAmelCase_ : List[str] = sorted(glob(input_path + '/*' ) )
if not len(snake_case_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(snake_case_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCAmelCase_ : Tuple = total_duration / len(snake_case_ )
UpperCAmelCase_ : str = [frame_duration] * len(snake_case_ )
if extend_frames:
UpperCAmelCase_ : List[str] = 1.5
UpperCAmelCase_ : Any = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(snake_case_ ) )
imageio.mimsave(snake_case_ , snake_case_ , duration=snake_case_ )
print(F'''gif saved to {output_path}''' )
def _UpperCamelCase ( self , snake_case_=None , snake_case_=None ):
'''simple docstring'''
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCAmelCase_ : Optional[Any] = preprocess(Image.open(snake_case_ ) , target_image_size=2_5_6 ).to(self.device )
UpperCAmelCase_ : Dict = preprocess_vqgan(snake_case_ )
UpperCAmelCase_ , *UpperCAmelCase_ : Tuple = self.vqgan.encode(snake_case_ )
return z
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.latent.detach().requires_grad_()
UpperCAmelCase_ : List[Any] = base_latent + transform_vector
if self.quantize:
UpperCAmelCase_ , *UpperCAmelCase_ : Tuple = self.vqgan.quantize(snake_case_ )
else:
UpperCAmelCase_ : Optional[int] = trans_latent
return self.vqgan.decode(snake_case_ )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None ):
'''simple docstring'''
UpperCAmelCase_ : int = self.clip_preprocessor(text=snake_case_ , images=snake_case_ , return_tensors='pt' , padding=snake_case_ )
UpperCAmelCase_ : Any = self.clip(**snake_case_ )
UpperCAmelCase_ : Dict = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase_ : Union[str, Any] = similarity_logits * weights
return similarity_logits.sum()
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self._get_clip_similarity(pos_prompts['prompts'] , snake_case_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCAmelCase_ : List[Any] = self._get_clip_similarity(neg_prompts['prompts'] , snake_case_ , weights=neg_prompts['weights'] )
else:
UpperCAmelCase_ : Union[str, Any] = torch.tensor([1] , device=self.device )
UpperCAmelCase_ : Dict = -torch.log(snake_case_ ) + torch.log(snake_case_ )
return loss
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = torch.randn_like(self.latent , requires_grad=snake_case_ , device=self.device )
UpperCAmelCase_ : int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase_ : Dict = self._add_vector(snake_case_ )
UpperCAmelCase_ : List[Any] = loop_post_process(snake_case_ )
UpperCAmelCase_ : Union[str, Any] = self._get_CLIP_loss(snake_case_ , snake_case_ , snake_case_ )
print('CLIP loss' , snake_case_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=snake_case_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
wandb.init(reinit=snake_case_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCAmelCase_ : str = Image.open(snake_case_ )
UpperCAmelCase_ : str = image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(snake_case_ ) )
def _UpperCamelCase ( self , snake_case_ ):
'''simple docstring'''
if not prompts:
return []
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[int] = []
if isinstance(snake_case_ , snake_case_ ):
UpperCAmelCase_ : Union[str, Any] = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(snake_case_ , (tuple, list) ):
UpperCAmelCase_ : Tuple = prompt[0]
UpperCAmelCase_ : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase_ , UpperCAmelCase_ : int = prompt.split(':' )
UpperCAmelCase_ : List[str] = float(snake_case_ )
else:
UpperCAmelCase_ : Optional[int] = prompt
UpperCAmelCase_ : List[str] = 1.0
processed_prompts.append(snake_case_ )
weights.append(snake_case_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(snake_case_ , device=self.device ),
}
def _UpperCamelCase ( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=True , snake_case_=None , ):
'''simple docstring'''
if image_path:
UpperCAmelCase_ : List[Any] = self._get_latent(snake_case_ )
else:
UpperCAmelCase_ : Any = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(snake_case_ , snake_case_ , snake_case_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase_ : Optional[int] = self.process_prompts(snake_case_ )
UpperCAmelCase_ : int = self.process_prompts(snake_case_ )
if save_final and save_path is None:
UpperCAmelCase_ : Union[str, Any] = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(snake_case_ ):
os.makedirs(snake_case_ )
else:
UpperCAmelCase_ : Any = save_path + '_' + get_timestamp()
os.makedirs(snake_case_ )
UpperCAmelCase_ : List[Any] = save_path
UpperCAmelCase_ : Dict = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(snake_case_ ) )
UpperCAmelCase_ : Optional[int] = loop_post_process(snake_case_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case_ , snake_case_ , snake_case_ ) ):
if show_intermediate:
show_pil(snake_case_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({'Image': wandb.Image(snake_case_ )} )
if show_final:
show_pil(snake_case_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F'''iter_{iter:03d}_final.png''' ) )
| 274 | 0 |
"""simple docstring"""
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__lowercase = HfArgumentParser(InitializationArguments)
__lowercase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__lowercase = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
__lowercase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__lowercase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 40 |
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 9_9999_9999
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowerCAmelCase_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__SCREAMING_SNAKE_CASE = remaining_time[j]
__SCREAMING_SNAKE_CASE = j
__SCREAMING_SNAKE_CASE = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__SCREAMING_SNAKE_CASE = remaining_time[short]
if minm == 0:
__SCREAMING_SNAKE_CASE = 9_9999_9999
if remaining_time[short] == 0:
complete += 1
__SCREAMING_SNAKE_CASE = False
# Find finish time of current process
__SCREAMING_SNAKE_CASE = increment_time + 1
# Calculate waiting time
__SCREAMING_SNAKE_CASE = finish_time - arrival_time[short]
__SCREAMING_SNAKE_CASE = finar - burst_time[short]
if waiting_time[short] < 0:
__SCREAMING_SNAKE_CASE = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_processes
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i]
__SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('''Enter how many process you want to analyze''')
a__ : Optional[Any] = int(input())
a__ : Optional[int] = [0] * no_of_processes
a__ : int = [0] * no_of_processes
a__ : List[Any] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('''Enter the arrival time and burst time for process:--''' + str(i + 1))
a__ , a__ : Tuple = map(int, input().split())
a__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a__ : Dict = burst_time
a__ : Any = no_of_processes
a__ : Optional[int] = waiting_time
a__ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a__ : str = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'''Process''',
'''BurstTime''',
'''ArrivalTime''',
'''WaitingTime''',
'''TurnAroundTime''',
],
)
# Printing the dataFrame
pd.set_option('''display.max_rows''', fcfs.shape[0] + 1)
print(fcfs)
| 54 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 154 | """simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
if inductance <= 0:
raise ValueError('Inductance cannot be 0 or negative' )
elif capacitance <= 0:
raise ValueError('Capacitance cannot be 0 or negative' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Union[List[PIL.Image.Image], np.ndarray]
lowercase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.26.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(""">=""", """0.0.12""")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : np.ndarray
lowercase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 75 |
'''simple docstring'''
a_ : Any = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
a_ : Any = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
a_ : Optional[Any] = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
a_ : str = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
a_ : Optional[int] = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
a_ : Dict = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
a_ : Tuple = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
a_ : Any = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 75 | 1 |
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase = OmegaConf.load(lowerCamelCase)
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase)))
return config
def __magic_name__( lowerCamelCase, lowerCamelCase=None, lowerCamelCase=None):
if conf_path is None:
__lowerCAmelCase = '''./model_checkpoints/vqgan_only.yaml'''
__lowerCAmelCase = load_config(lowerCamelCase, display=lowerCamelCase)
__lowerCAmelCase = VQModel(**config.model.params)
if ckpt_path is None:
__lowerCAmelCase = '''./model_checkpoints/vqgan_only.pt'''
__lowerCAmelCase = torch.load(lowerCamelCase, map_location=lowerCamelCase)
if ".ckpt" in ckpt_path:
__lowerCAmelCase = sd['''state_dict''']
model.load_state_dict(lowerCamelCase, strict=lowerCamelCase)
model.to(lowerCamelCase)
del sd
return model
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = model.encode(lowerCamelCase)
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""")
__lowerCAmelCase = model.decode(lowerCamelCase)
return xrec
def __magic_name__( lowerCamelCase, lowerCamelCase=False):
__lowerCAmelCase , __lowerCAmelCase = string.rsplit('''.''', 1)
if reload:
__lowerCAmelCase = importlib.import_module(lowerCamelCase)
importlib.reload(lowerCamelCase)
return getattr(importlib.import_module(lowerCamelCase, package=lowerCamelCase), cls)
def __magic_name__( lowerCamelCase):
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''')
return get_obj_from_str(config['''target'''])(**config.get('''params''', {}))
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase=True, lowerCamelCase=True):
__lowerCAmelCase = instantiate_from_config(lowerCamelCase)
if sd is not None:
model.load_state_dict(lowerCamelCase)
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
# load the specified checkpoint
if ckpt:
__lowerCAmelCase = torch.load(lowerCamelCase, map_location='''cpu''')
__lowerCAmelCase = pl_sd['''global_step''']
print(F"""loaded model from global step {global_step}.""")
else:
__lowerCAmelCase = {'''state_dict''': None}
__lowerCAmelCase = None
__lowerCAmelCase = load_model_from_config(config.model, pl_sd['''state_dict'''], gpu=lowerCamelCase, eval_mode=lowerCamelCase)['''model''']
return model, global_step | 365 |
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_UpperCAmelCase : str = logging.get_logger(__name__)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
return [
int(1_0_0_0 * (box[0] / width)),
int(1_0_0_0 * (box[1] / height)),
int(1_0_0_0 * (box[2] / width)),
int(1_0_0_0 * (box[3] / height)),
]
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = None):
__lowerCAmelCase = tesseract_config if tesseract_config is not None else ''''''
# apply OCR
__lowerCAmelCase = to_pil_image(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase = pil_image.size
__lowerCAmelCase = pytesseract.image_to_data(lowerCamelCase, lang=lowerCamelCase, output_type='''dict''', config=lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
__lowerCAmelCase = [idx for idx, word in enumerate(lowerCamelCase) if not word.strip()]
__lowerCAmelCase = [word for idx, word in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
__lowerCAmelCase = [coord for idx, coord in enumerate(lowerCamelCase) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__lowerCAmelCase = []
for x, y, w, h in zip(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = [x, y, x + w, y + h]
actual_boxes.append(lowerCamelCase)
# finally, normalize the bounding boxes
__lowerCAmelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCamelCase, lowerCamelCase, lowerCamelCase))
assert len(lowerCamelCase) == len(lowerCamelCase), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : str = ['pixel_values']
def __init__(self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BILINEAR , __lowercase = True , __lowercase = None , __lowercase = "" , **__lowercase , ):
super().__init__(**__lowercase )
__lowerCAmelCase = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = apply_ocr
__lowerCAmelCase = ocr_lang
__lowerCAmelCase = tesseract_config
def _snake_case (self , __lowercase , __lowercase , __lowercase = PILImageResampling.BILINEAR , __lowercase = None , **__lowercase , ):
__lowerCAmelCase = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__lowerCAmelCase = (size['''height'''], size['''width'''])
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(__lowercase )
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
__lowerCAmelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
__lowerCAmelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
__lowerCAmelCase = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowercase ) for image in images]
if apply_ocr:
requires_backends(self , '''pytesseract''' )
__lowerCAmelCase = []
__lowerCAmelCase = []
for image in images:
__lowerCAmelCase , __lowerCAmelCase = apply_tesseract(__lowercase , __lowercase , __lowercase )
words_batch.append(__lowercase )
boxes_batch.append(__lowercase )
if do_resize:
__lowerCAmelCase = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
__lowerCAmelCase = [flip_channel_order(__lowercase ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__lowerCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=__lowercase )
if apply_ocr:
__lowerCAmelCase = words_batch
__lowerCAmelCase = boxes_batch
return data
| 9 | 0 |
from math import ceil, sqrt
def lowercase__ ( __snake_case : int = 1_000_000 ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
UpperCAmelCase_ : Union[str, Any] = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
UpperCAmelCase_ : List[str] = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F'{solution() = }')
| 29 |
__UpperCAmelCase = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29 | 1 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__UpperCAmelCase :Optional[Any] = Lock()
def _a ( _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Tuple , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Dict , _lowercase : Tuple ):
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_lowercase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : Union[str, Any] = min(_lowercase , _lowercase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_lowercase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : Dict = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : int = max(_lowercase , _lowercase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_lowercase )
def _a ( _lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : str = []
__UpperCAmelCase : Dict = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : int = Pipe()
__UpperCAmelCase : List[Any] = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
__UpperCAmelCase : Union[str, Any] = temp_rs
__UpperCAmelCase : int = temp_rr
for i in range(1 , len(_lowercase ) - 1 ):
__UpperCAmelCase : Dict = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
__UpperCAmelCase : int = temp_rs
__UpperCAmelCase : Union[str, Any] = temp_rr
process_array_.append(
Process(
target=_lowercase , args=(
len(_lowercase ) - 1,
arr[len(_lowercase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_lowercase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_lowercase ) ):
__UpperCAmelCase : Optional[Any] = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = list(range(10 , 0 , -1 ) )
print('''Initial List''' )
print(*_lowercase )
__UpperCAmelCase : List[Any] = odd_even_transposition(_lowercase )
print('''Sorted List\n''' )
print(*_lowercase )
if __name__ == "__main__":
main() | 363 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Any , snake_case : Any , snake_case : Optional[int]=13 , snake_case : List[str]=7 , snake_case : List[str]=True , snake_case : List[Any]=True , snake_case : int=True , snake_case : Tuple=True , snake_case : int=99 , snake_case : Any=16 , snake_case : Dict=36 , snake_case : Any=6 , snake_case : Dict=6 , snake_case : Dict=6 , snake_case : int=37 , snake_case : int="gelu" , snake_case : str=0.1 , snake_case : Any=0.1 , snake_case : Dict=512 , snake_case : List[Any]=16 , snake_case : Any=2 , snake_case : Any=0.02 , snake_case : Optional[int]=3 , snake_case : List[Any]=4 , snake_case : List[str]=None , ) -> Union[str, Any]:
__UpperCAmelCase : str = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : int = seq_length
__UpperCAmelCase : Optional[Any] = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : List[Any] = use_token_type_ids
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : int = vocab_size
__UpperCAmelCase : Optional[int] = embedding_size
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : Union[str, Any] = num_hidden_layers
__UpperCAmelCase : List[Any] = num_hidden_groups
__UpperCAmelCase : Any = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[int] = max_position_embeddings
__UpperCAmelCase : List[str] = type_vocab_size
__UpperCAmelCase : Any = type_sequence_label_size
__UpperCAmelCase : List[Any] = initializer_range
__UpperCAmelCase : Dict = num_labels
__UpperCAmelCase : str = num_choices
__UpperCAmelCase : Union[str, Any] = scope
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Dict = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : int = None
__UpperCAmelCase : List[str] = None
__UpperCAmelCase : Optional[Any] = None
if self.use_labels:
__UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : List[str] ) -> Optional[Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase__ ( self : Tuple , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int ) -> Optional[int]:
__UpperCAmelCase : List[Any] = AlbertModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase__ ( self : List[str] , snake_case : str , snake_case : Optional[int] , snake_case : List[Any] , snake_case : Any , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] ) -> Optional[int]:
__UpperCAmelCase : str = AlbertForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , sentence_order_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase__ ( self : Dict , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Dict = AlbertForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Tuple , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple ) -> int:
__UpperCAmelCase : Optional[Any] = AlbertForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[str] , snake_case : Dict , snake_case : Optional[int] , snake_case : Dict , snake_case : int , snake_case : Optional[int] , snake_case : Optional[Any] ) -> Any:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Any = AlbertForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : str , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[str] ) -> int:
__UpperCAmelCase : Optional[int] = self.num_labels
__UpperCAmelCase : Optional[int] = AlbertForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : Tuple , snake_case : List[Any] , snake_case : Dict , snake_case : int , snake_case : List[Any] , snake_case : List[Any] , snake_case : Optional[Any] ) -> Tuple:
__UpperCAmelCase : Optional[int] = self.num_choices
__UpperCAmelCase : List[Any] = AlbertForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : List[Any] = config_and_inputs
__UpperCAmelCase : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Any = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Any = True
def lowerCamelCase__ ( self : Optional[int] , snake_case : Any , snake_case : Dict , snake_case : Tuple=False ) -> Optional[Any]:
__UpperCAmelCase : Any = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
__UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Dict ) -> int:
__UpperCAmelCase : List[Any] = AlbertModelTester(self )
__UpperCAmelCase : Any = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Any ) -> Any:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]:
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> str:
__UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : List[str] ) -> Tuple:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> Any:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : Tuple = type
self.model_tester.create_and_check_model(*snake_case )
@slow
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = AlbertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = AlbertModel.from_pretrained('''albert-base-v2''' )
__UpperCAmelCase : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__UpperCAmelCase : Optional[int] = model(snake_case , attention_mask=snake_case )[0]
__UpperCAmelCase : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : int = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) ) | 240 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :int = 3 , snake_case_ :int = 7 , snake_case_ :int = 1_000_000 ):
__UpperCAmelCase = 0
__UpperCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
__UpperCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCAmelCase = current_numerator
__UpperCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 332 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Optional[int] ):
return EnvironmentCommand()
def lowercase__ ( snake_case_ :List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class _UpperCAmelCase ( _lowerCAmelCase ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Optional[int] , _lowercase : str , *_lowercase : Tuple ):
__UpperCAmelCase = accelerate_config_file
def a ( self : Dict ):
__UpperCAmelCase = '''not installed'''
if is_safetensors_available():
import safetensors
__UpperCAmelCase = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
__UpperCAmelCase = F'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = __UpperCAmelCase = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCAmelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
__UpperCAmelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else F'''\t{accelerate_config}'''
)
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_torch_available():
import torch
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_tf_available():
import tensorflow as tf
__UpperCAmelCase = tf.__version__
try:
# deprecated in v2.1
__UpperCAmelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCAmelCase = bool(tf.config.list_physical_devices('''GPU''' ) )
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''not installed'''
__UpperCAmelCase = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCAmelCase = flax.__version__
__UpperCAmelCase = jax.__version__
__UpperCAmelCase = jaxlib.__version__
__UpperCAmelCase = jax.lib.xla_bridge.get_backend().platform
__UpperCAmelCase = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F'''{safetensors_version}''',
'''Accelerate version''': F'''{accelerate_version}''',
'''Accelerate config''': F'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': F'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': F'''{flax_version} ({jax_backend})''',
'''Jax version''': F'''{jax_version}''',
'''JaxLib version''': F'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def a ( _lowercase : str ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 332 | 1 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase ( lowercase , lowercase , unittest.TestCase ):
UpperCAmelCase : Tuple = IFInpaintingPipeline
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
UpperCAmelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Dict = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _lowercase (self : List[str]) -> Optional[Any]:
return self._get_dummy_components()
def _lowercase (self : Tuple , _A : Union[str, Any] , _A : int=0) -> Dict:
if str(_A).startswith('mps'):
__snake_case : Any = torch.manual_seed(_A)
else:
__snake_case : Any = torch.Generator(device=_A).manual_seed(_A)
__snake_case : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A)).to(_A)
__snake_case : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A)).to(_A)
__snake_case : str = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _lowercase (self : Optional[int]) -> Dict:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
def _lowercase (self : Any) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA')
def _lowercase (self : int) -> Optional[int]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def _lowercase (self : Dict) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def _lowercase (self : str) -> int:
self._test_save_load_local()
def _lowercase (self : List[Any]) -> List[str]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 358 | """simple docstring"""
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> str:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
__snake_case : Any = precision
__snake_case : Optional[int] = ceil(precision / 14 )
__snake_case : List[Any] = 42_68_80 * Decimal(1_00_05 ).sqrt()
__snake_case : Optional[Any] = 1
__snake_case : Union[str, Any] = 13_59_14_09
__snake_case : int = Decimal(UpperCAmelCase_ )
for k in range(1 , UpperCAmelCase_ ):
__snake_case : Union[str, Any] = factorial(6 * k ) // (factorial(3 * k ) * factorial(UpperCAmelCase_ ) ** 3)
linear_term += 5_45_14_01_34
exponential_term *= -26_25_37_41_26_40_76_80_00
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_a : List[Any]= 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 95 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_autoformer""": [
"""AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""AutoformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AutoformerForPrediction""",
"""AutoformerModel""",
"""AutoformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 327 | 1 |
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
__A = TypeVar('KT')
__A = TypeVar('VT')
class SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self: Any , __A: KT | str = "root" , __A: VT | None = None ) -> str:
_A = key
_A = value
_A = []
def __repr__( self: Optional[Any] ) -> str:
return f"""Node({self.key}: {self.value})"""
@property
def __A ( self: List[Any] ) -> int:
return len(self.forward )
class SCREAMING_SNAKE_CASE ( Generic[KT, VT] ):
"""simple docstring"""
def __init__( self: Optional[Any] , __A: float = 0.5 , __A: int = 16 ) -> str:
_A = Node[KT, VT]()
_A = 0
_A = p
_A = max_level
def __str__( self: Dict ) -> str:
_A = list(self )
if len(UpperCamelCase__ ) == 0:
return f"""SkipList(level={self.level})"""
_A = max((len(str(UpperCamelCase__ ) ) for item in items) , default=4 )
_A = max(UpperCamelCase__ , 4 ) + 4
_A = self.head
_A = []
_A = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(UpperCamelCase__ , '''-''' ) + '''* ''' * len(UpperCamelCase__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(UpperCamelCase__ ) )
while len(node.forward ) != 0:
_A = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(UpperCamelCase__ , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(UpperCamelCase__ ) )
_A = node.forward
lines.append('''None'''.ljust(UpperCamelCase__ ) + '''* ''' * len(UpperCamelCase__ ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(UpperCamelCase__ )
def __iter__( self: Optional[Any] ) -> str:
_A = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
_A = node.forward[0]
def __A ( self: List[Any] ) -> int:
_A = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def __A ( self: Optional[int] , __A: Any ) -> tuple[Node[KT, VT] | None, list[Node[KT, VT]]]:
_A = []
_A = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
_A = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(UpperCamelCase__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def __A ( self: List[str] , __A: KT ) -> str:
_A = self._locate_node(UpperCamelCase__ )
if node is not None:
for i, update_node in enumerate(UpperCamelCase__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
_A = node.forward[i]
else:
_A = update_node.forward[:i]
def __A ( self: Any , __A: KT , __A: VT ) -> Optional[int]:
_A = self._locate_node(UpperCamelCase__ )
if node is not None:
_A = value
else:
_A = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , UpperCamelCase__ ):
update_vector.append(self.head )
_A = level
_A = Node(UpperCamelCase__ , UpperCamelCase__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(UpperCamelCase__ )
else:
_A = new_node
def __A ( self: Tuple , __A: VT ) -> VT | None:
_A = self._locate_node(UpperCamelCase__ )
if node is not None:
return node.value
return None
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
_A = skip_list.head
_A = {}
while node.level != 0:
_A = node.forward[0]
_A = node.value
assert len(__SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
_A = skip_list.head
_A = {}
while node.level != 0:
_A = node.forward[0]
_A = node.value
if len(__SCREAMING_SNAKE_CASE ) != 4:
print()
assert len(__SCREAMING_SNAKE_CASE ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def __A ( ):
'''simple docstring'''
_A = SkipList()
assert skip_list.find('''Some key''' ) is None
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 1_42 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_lowercase ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(__SCREAMING_SNAKE_CASE )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def __A ( ):
'''simple docstring'''
def is_sorted(_lowercase ):
return all(next_item >= item for item, next_item in zip(__SCREAMING_SNAKE_CASE , lst[1:] ) )
_A = SkipList()
for i in range(10 ):
skip_list.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert is_sorted(list(__SCREAMING_SNAKE_CASE ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(__SCREAMING_SNAKE_CASE ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(__SCREAMING_SNAKE_CASE ) )
def __A ( ):
'''simple docstring'''
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def __A ( ):
'''simple docstring'''
_A = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 369 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
__A = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
__A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(f'A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __A ( _lowercase ):
'''simple docstring'''
if "://" in dataset_path:
_A = dataset_path.split('''://''' )[1]
return dataset_path
def __A ( _lowercase ):
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = not is_remote_filesystem(_lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowercase ) , fs._strip_protocol(_lowercase ) )
else:
fs.mv(_lowercase , _lowercase , recursive=_lowercase )
def __A ( ):
'''simple docstring'''
if hasattr(fsspec.asyn , '''reset_lock''' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_A = None
_A = None
_A = threading.Lock()
| 75 | 0 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : str=None ):
UpperCAmelCase : Optional[Any] = None
if token is not None:
UpperCAmelCase : str = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCAmelCase : List[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"
UpperCAmelCase : List[str] = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
UpperCAmelCase : List[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
UpperCAmelCase : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase ):
UpperCAmelCase : List[str] = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=None ):
UpperCAmelCase : Optional[Any] = None
if token is not None:
UpperCAmelCase : List[str] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCAmelCase : Optional[Any] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"
UpperCAmelCase : Tuple = requests.get(UpperCamelCase , headers=UpperCamelCase ).json()
UpperCAmelCase : List[str] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
UpperCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase ):
UpperCAmelCase : Tuple = requests.get(url + F"&page={i + 2}" , headers=UpperCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"Unknown error, could not fetch links:\n{traceback.format_exc()}" )
return {}
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Any ):
UpperCAmelCase : Any = None
if token is not None:
UpperCAmelCase : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
UpperCAmelCase : List[Any] = requests.get(UpperCamelCase , headers=UpperCamelCase , allow_redirects=UpperCamelCase )
UpperCAmelCase : int = result.headers["""Location"""]
UpperCAmelCase : Union[str, Any] = requests.get(UpperCamelCase , allow_redirects=UpperCamelCase )
UpperCAmelCase : Optional[Any] = os.path.join(UpperCamelCase , F"{artifact_name}.zip" )
with open(UpperCamelCase , """wb""" ) as fp:
fp.write(response.content )
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : str=None ):
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : List[Any] = None
with zipfile.ZipFile(UpperCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(UpperCamelCase ) as f:
for line in f:
UpperCAmelCase : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
UpperCAmelCase : List[str] = line[: line.index(""": """ )]
UpperCAmelCase : Union[str, Any] = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
UpperCAmelCase : List[str] = line[len("""FAILED """ ) :]
failed_tests.append(UpperCamelCase )
elif filename == "job_name.txt":
UpperCAmelCase : Optional[int] = line
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError(
F"`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCamelCase )} for `errors` "
F"and {len(UpperCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"
""" problem.""" )
UpperCAmelCase : Union[str, Any] = None
if job_name and job_links:
UpperCAmelCase : Optional[Any] = job_links.get(UpperCamelCase , UpperCamelCase )
# A list with elements of the form (line of error, error, failed test)
UpperCAmelCase : str = [x + [y] + [job_link] for x, y in zip(UpperCamelCase , UpperCamelCase )]
return result
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : str=None ):
UpperCAmelCase : Tuple = []
UpperCAmelCase : Any = [os.path.join(UpperCamelCase , UpperCamelCase ) for p in os.listdir(UpperCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(UpperCamelCase , job_links=UpperCamelCase ) )
return errors
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : str=None ):
UpperCAmelCase : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
UpperCAmelCase : str = counter.most_common()
UpperCAmelCase : Tuple = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
UpperCAmelCase : List[Any] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
UpperCAmelCase : List[str] = dict(sorted(r.items() , key=lambda UpperCamelCase : item[1]["count"] , reverse=UpperCamelCase ) )
return r
def _snake_case ( UpperCamelCase : Optional[int] ):
UpperCAmelCase : List[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
UpperCAmelCase : List[str] = test.split("""/""" )[2]
else:
UpperCAmelCase : Tuple = None
return test
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Tuple=None ):
UpperCAmelCase : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
UpperCAmelCase : Union[str, Any] = [x for x in logs if x[2] is not None]
UpperCAmelCase : List[str] = {x[2] for x in logs}
UpperCAmelCase : Tuple = {}
for test in tests:
UpperCAmelCase : Tuple = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
UpperCAmelCase : List[Any] = counter.most_common()
UpperCAmelCase : Union[str, Any] = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
UpperCAmelCase : List[str] = sum(error_counts.values() )
if n_errors > 0:
UpperCAmelCase : Optional[int] = {"""count""": n_errors, """errors""": error_counts}
UpperCAmelCase : Dict = dict(sorted(r.items() , key=lambda UpperCamelCase : item[1]["count"] , reverse=UpperCamelCase ) )
return r
def _snake_case ( UpperCamelCase : List[Any] ):
UpperCAmelCase : str = """| no. | error | status |"""
UpperCAmelCase : Any = """|-:|:-|:-|"""
UpperCAmelCase : Dict = [header, sep]
for error in reduced_by_error:
UpperCAmelCase : Tuple = reduced_by_error[error]["""count"""]
UpperCAmelCase : Optional[int] = F"| {count} | {error[:100]} | |"
lines.append(UpperCamelCase )
return "\n".join(UpperCamelCase )
def _snake_case ( UpperCamelCase : int ):
UpperCAmelCase : str = """| model | no. of errors | major error | count |"""
UpperCAmelCase : Optional[int] = """|-:|-:|-:|-:|"""
UpperCAmelCase : List[str] = [header, sep]
for model in reduced_by_model:
UpperCAmelCase : Union[str, Any] = reduced_by_model[model]["""count"""]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
UpperCAmelCase : Tuple = F"| {model} | {count} | {error[:60]} | {_count} |"
lines.append(UpperCamelCase )
return "\n".join(UpperCamelCase )
if __name__ == "__main__":
A: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
A: Tuple = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
A: Union[str, Any] = get_job_links(args.workflow_run_id, token=args.token)
A: Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
A: str = k.find(" / ")
A: Tuple = k[index + len(" / ") :]
A: int = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
A: Optional[int] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
A: int = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
A: str = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
A: Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
A: str = reduce_by_error(errors)
A: Dict = reduce_by_model(errors)
A: Union[str, Any] = make_github_table(reduced_by_error)
A: Any = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 109 | import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_UpperCAmelCase = 2
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , *, # begin keyword-only arguments
lowercase="<s>" , lowercase="<pad>" , lowercase="</s>" , lowercase="<unk>" , lowercase=None , ):
"""simple docstring"""
A_ , A_ , A_ , A_ : Tuple = bos, unk, pad, eos
A_ : Optional[Any] = []
A_ : Dict = []
A_ : List[Any] = {}
A_ : int = self.add_symbol(lowercase )
A_ : Union[str, Any] = self.add_symbol(lowercase )
A_ : Union[str, Any] = self.add_symbol(lowercase )
A_ : Any = self.add_symbol(lowercase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(lowercase )
A_ : Tuple = len(self.symbols )
def __eq__( self , lowercase ):
"""simple docstring"""
return self.indices == other.indices
def __getitem__( self , lowercase ):
"""simple docstring"""
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ):
"""simple docstring"""
return len(self.symbols )
def __contains__( self , lowercase ):
"""simple docstring"""
return sym in self.indices
@classmethod
def lowerCAmelCase_ ( cls , lowercase ):
"""simple docstring"""
A_ : int = cls()
d.add_from_file(lowercase )
return d
def lowerCAmelCase_ ( self , lowercase , lowercase=1 , lowercase=False ):
"""simple docstring"""
if word in self.indices and not overwrite:
A_ : List[Any] = self.indices[word]
A_ : List[str] = self.count[idx] + n
return idx
else:
A_ : int = len(self.symbols )
A_ : Optional[Any] = idx
self.symbols.append(lowercase )
self.count.append(lowercase )
return idx
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return 0
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if isinstance(lowercase , lowercase ):
try:
with open(lowercase , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(lowercase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('Incorrect encoding detected in {}, please rebuild the dataset'.format(lowercase ) )
return
A_ : Any = f.readlines()
A_ : List[Any] = self._load_meta(lowercase )
for line in lines[indices_start_line:]:
try:
A_ , A_ : int = line.rstrip().rsplit(' ' , 1 )
if field == "#fairseq:overwrite":
A_ : Optional[int] = True
A_ , A_ : str = line.rsplit(' ' , 1 )
else:
A_ : Optional[int] = False
A_ : Optional[int] = int(lowercase )
A_ : Tuple = line
if word in self and not overwrite:
raise RuntimeError(
'Duplicate word found when loading Dictionary: \'{}\'. '
'Duplicate words can overwrite earlier ones by adding the '
'#fairseq:overwrite flag at the end of the corresponding row '
'in the dictionary file. If using the Camembert model, please '
'download an updated copy of the model file.'.format(lowercase ) )
self.add_symbol(lowercase , n=lowercase , overwrite=lowercase )
except ValueError:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt> [flags]\'' )
def UpperCamelCase ( __lowercase : Any ):
'''simple docstring'''
A_ : Optional[Any] = dict((re.sub(r'@@$' ,'' ,__lowercase ), v) if k.endswith('@@' ) else (re.sub(r'$' ,'</w>' ,__lowercase ), v) for k, v in d.items() )
A_ : Optional[Any] = '<s> <pad> </s> <unk>'.split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
A_ : Union[str, Any] = d[k] # restore
return da
def UpperCamelCase ( __lowercase : Any ,__lowercase : str ):
'''simple docstring'''
if not os.path.exists(__lowercase ):
raise ValueError(f'''path {biogpt_checkpoint_path} does not exist!''' )
os.makedirs(__lowercase ,exist_ok=__lowercase )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
A_ : Optional[Any] = os.path.join(__lowercase ,'checkpoint.pt' )
if not os.path.isfile(__lowercase ):
raise ValueError(f'''path to the file {checkpoint_file} does not exist!''' )
A_ : Any = torch.load(__lowercase ,map_location='cpu' )
A_ : str = chkpt['cfg']['model']
# dicts
A_ : Any = os.path.join(__lowercase ,'dict.txt' )
if not os.path.isfile(__lowercase ):
raise ValueError(f'''path to the file {dict_file} does not exist!''' )
A_ : Optional[int] = Dictionary.load(__lowercase )
A_ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
A_ : List[Any] = len(__lowercase )
A_ : Tuple = os.path.join(__lowercase ,VOCAB_FILES_NAMES['vocab_file'] )
print(f'''Generating {src_vocab_file} of {src_vocab_size} records''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase ,ensure_ascii=__lowercase ,indent=__lowercase ) )
# merges_file (bpecodes)
A_ : List[Any] = os.path.join(__lowercase ,'bpecodes' )
if not os.path.isfile(__lowercase ):
raise ValueError(f'''path to the file {bpecodes_file} does not exist!''' )
A_ : Optional[Any] = os.path.join(__lowercase ,VOCAB_FILES_NAMES['merges_file'] )
shutil.copyfile(__lowercase ,__lowercase )
# model config
A_ : Dict = os.path.join(__lowercase ,'config.json' )
A_ : List[Any] = {
'activation_dropout': args['activation_dropout'],
'architectures': ['BioGptForCausalLM'],
'attention_probs_dropout_prob': args['attention_dropout'],
'bos_token_id': 0,
'eos_token_id': 2,
'hidden_act': args['activation_fn'],
'hidden_dropout_prob': args['dropout'],
'hidden_size': args['decoder_embed_dim'],
'initializer_range': 0.02,
'intermediate_size': args['decoder_ffn_embed_dim'],
'layer_norm_eps': 1e-1_2,
'layerdrop': args['decoder_layerdrop'],
'max_position_embeddings': args['max_target_positions'],
'model_type': 'biogpt',
'num_attention_heads': args['decoder_attention_heads'],
'num_hidden_layers': args['decoder_layers'],
'pad_token_id': 1,
'scale_embedding': not args['no_scale_embedding'],
'tie_word_embeddings': args['share_decoder_input_output_embed'],
'vocab_size': src_vocab_size,
}
# good hparam defaults to start with
print(f'''Generating {biogpt_model_config_file}''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase ,ensure_ascii=__lowercase ,indent=__lowercase ) )
# tokenizer config
A_ : List[Any] = os.path.join(__lowercase ,__lowercase )
A_ : Dict = {
'bos_token': '<s>',
'eos_token': '</s>',
'model_max_length': 10_24,
'pad_token': '<pad>',
'special_tokens_map_file': None,
'tokenizer_class': 'BioGptTokenizer',
'unk_token': '<unk>',
}
print(f'''Generating {biogpt_tokenizer_config_file}''' )
with open(__lowercase ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(__lowercase ,ensure_ascii=__lowercase ,indent=__lowercase ) )
# model
A_ : Any = chkpt['model']
# remove unneeded keys
A_ : List[Any] = [
'decoder.version',
]
for k in ignore_keys:
model_state_dict.pop(__lowercase ,__lowercase )
A_ : int = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('output_projection.weight' ):
A_ : Union[str, Any] = model_state_dict.pop(__lowercase )
else:
A_ : str = model_state_dict.pop(__lowercase )
A_ : Optional[int] = BioGptConfig.from_pretrained(__lowercase )
A_ : List[Any] = BioGptForCausalLM(__lowercase )
# check that it loads ok
model_new.load_state_dict(__lowercase )
# save
A_ : List[str] = os.path.join(__lowercase ,__lowercase )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(__lowercase ,__lowercase )
print('Conversion is done!' )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--biogpt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 140 | 0 |
'''simple docstring'''
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='session' )
def a_ ( ) -> str:
__snake_case : str = 10
__snake_case : Tuple = datasets.Features(
{
'tokens': datasets.Sequence(datasets.Value('string' ) ),
'labels': datasets.Sequence(datasets.ClassLabel(names=['negative', 'positive'] ) ),
'answers': datasets.Sequence(
{
'text': datasets.Value('string' ),
'answer_start': datasets.Value('int32' ),
} ),
'id': datasets.Value('int64' ),
} )
__snake_case : str = datasets.Dataset.from_dict(
{
'tokens': [['foo'] * 5] * n,
'labels': [[1] * 5] * n,
'answers': [{'answer_start': [97], 'text': ['1976']}] * 10,
'id': list(range(_UpperCAmelCase ) ),
} ,features=_UpperCAmelCase ,)
return dataset
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Optional[int] ) -> List[Any]:
__snake_case : Any = str(tmp_path_factory.mktemp('data' ) / 'file.arrow' )
dataset.map(cache_file_name=_UpperCAmelCase )
return filename
# FILE_CONTENT + files
A__ : Optional[int] = '''\
Text data.
Second line of data.'''
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Any ) -> Union[str, Any]:
__snake_case : int = tmp_path_factory.mktemp('data' ) / 'file.txt'
__snake_case : Union[str, Any] = FILE_CONTENT
with open(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase )
return filename
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Dict ) -> Tuple:
import bza
__snake_case : Dict = tmp_path_factory.mktemp('data' ) / 'file.txt.bz2'
__snake_case : Any = bytes(_UpperCAmelCase ,'utf-8' )
with bza.open(_UpperCAmelCase ,'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : int ) -> str:
import gzip
__snake_case : Optional[int] = str(tmp_path_factory.mktemp('data' ) / 'file.txt.gz' )
__snake_case : Tuple = bytes(_UpperCAmelCase ,'utf-8' )
with gzip.open(_UpperCAmelCase ,'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ) -> str:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
__snake_case : int = tmp_path_factory.mktemp('data' ) / 'file.txt.lz4'
__snake_case : int = bytes(_UpperCAmelCase ,'utf-8' )
with lza.frame.open(_UpperCAmelCase ,'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ) -> Optional[Any]:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
__snake_case : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'file.txt.7z'
with pyazr.SevenZipFile(_UpperCAmelCase ,'w' ) as archive:
archive.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Dict ) -> str:
import tarfile
__snake_case : int = tmp_path_factory.mktemp('data' ) / 'file.txt.tar'
with tarfile.TarFile(_UpperCAmelCase ,'w' ) as f:
f.add(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Any ) -> str:
import lzma
__snake_case : List[str] = tmp_path_factory.mktemp('data' ) / 'file.txt.xz'
__snake_case : Tuple = bytes(_UpperCAmelCase ,'utf-8' )
with lzma.open(_UpperCAmelCase ,'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ) -> Dict:
import zipfile
__snake_case : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
__snake_case : int = tmp_path_factory.mktemp('data' ) / 'file.txt.zst'
__snake_case : int = bytes(_UpperCAmelCase ,'utf-8' )
with zstd.open(_UpperCAmelCase ,'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case : Tuple = tmp_path_factory.mktemp('data' ) / 'file.xml'
__snake_case : str = textwrap.dedent(
'\\n <?xml version="1.0" encoding="UTF-8" ?>\n <tmx version="1.4">\n <header segtype="sentence" srclang="ca" />\n <body>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang="en"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang="en"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang="en"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang="en"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang="en"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>' )
with open(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase )
return filename
A__ : List[Any] = [
{'''col_1''': '''0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''3''', '''col_2''': 3, '''col_3''': 3.0},
]
A__ : Any = [
{'''col_1''': '''4''', '''col_2''': 4, '''col_3''': 4.0},
{'''col_1''': '''5''', '''col_2''': 5, '''col_3''': 5.0},
]
A__ : Union[str, Any] = {
'''col_1''': ['''0''', '''1''', '''2''', '''3'''],
'''col_2''': [0, 1, 2, 3],
'''col_3''': [0.0, 1.0, 2.0, 3.0],
}
A__ : int = [
{'''col_3''': 0.0, '''col_1''': '''0''', '''col_2''': 0},
{'''col_3''': 1.0, '''col_1''': '''1''', '''col_2''': 1},
]
A__ : Optional[Any] = [
{'''col_1''': '''s0''', '''col_2''': 0, '''col_3''': 0.0},
{'''col_1''': '''s1''', '''col_2''': 1, '''col_3''': 1.0},
{'''col_1''': '''s2''', '''col_2''': 2, '''col_3''': 2.0},
{'''col_1''': '''s3''', '''col_2''': 3, '''col_3''': 3.0},
]
@pytest.fixture(scope='session' )
def a_ ( ) -> List[Any]:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[str] ) -> Any:
__snake_case : Any = datasets.Dataset.from_dict(_UpperCAmelCase )
__snake_case : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset.arrow' )
dataset.map(cache_file_name=_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[Any] ) -> Optional[Any]:
__snake_case : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.sqlite' )
with contextlib.closing(sqlitea.connect(_UpperCAmelCase ) ) as con:
__snake_case : str = con.cursor()
cur.execute('CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)' )
for item in DATA:
cur.execute('INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)' ,tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ) -> Optional[int]:
__snake_case : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.csv' )
with open(_UpperCAmelCase ,'w' ,newline='' ) as f:
__snake_case : Optional[int] = csv.DictWriter(_UpperCAmelCase ,fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Tuple ) -> Tuple:
__snake_case : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.csv' )
with open(_UpperCAmelCase ,'w' ,newline='' ) as f:
__snake_case : Optional[int] = csv.DictWriter(_UpperCAmelCase ,fieldnames=['col_1', 'col_2', 'col_3'] )
writer.writeheader()
for item in DATA:
writer.writerow(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> Optional[int]:
import bza
__snake_case : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.bz2'
with open(_UpperCAmelCase ,'rb' ) as f:
__snake_case : List[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_UpperCAmelCase ,'wb' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Dict ) -> Any:
__snake_case : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case : Dict = tmp_path_factory.mktemp('data' ) / 'dataset.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.basename(csv_path.replace('.csv' ,'.CSV' ) ) )
f.write(_UpperCAmelCase ,arcname=os.path.basename(csva_path.replace('.csv' ,'.CSV' ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : str ,_UpperCAmelCase : Tuple ) -> List[str]:
__snake_case : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.csv.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.join('main_dir' ,os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase ,arcname=os.path.join('main_dir' ,os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : str ) -> Union[str, Any]:
__snake_case : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.parquet' )
__snake_case : Union[str, Any] = pa.schema(
{
'col_1': pa.string(),
'col_2': pa.intaa(),
'col_3': pa.floataa(),
} )
with open(_UpperCAmelCase ,'wb' ) as f:
__snake_case : Union[str, Any] = pq.ParquetWriter(_UpperCAmelCase ,schema=_UpperCAmelCase )
__snake_case : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_UpperCAmelCase ) )] for k in DATA[0]} ,schema=_UpperCAmelCase )
writer.write_table(_UpperCAmelCase )
writer.close()
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> str:
__snake_case : int = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case : Optional[int] = {'data': DATA}
with open(_UpperCAmelCase ,'w' ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Tuple ) -> Optional[Any]:
__snake_case : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.json' )
__snake_case : str = {'data': DATA_DICT_OF_LISTS}
with open(_UpperCAmelCase ,'w' ) as f:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Tuple ) -> Dict:
__snake_case : Tuple = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl' )
with open(_UpperCAmelCase ,'w' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Any ) -> Tuple:
__snake_case : Union[str, Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.jsonl' )
with open(_UpperCAmelCase ,'w' ) as f:
for item in DATA:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ) -> Optional[int]:
__snake_case : int = str(tmp_path_factory.mktemp('data' ) / 'dataset_312.jsonl' )
with open(_UpperCAmelCase ,'w' ) as f:
for item in DATA_312:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Any ) -> List[str]:
__snake_case : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset-str.jsonl' )
with open(_UpperCAmelCase ,'w' ) as f:
for item in DATA_STR:
f.write(json.dumps(_UpperCAmelCase ) + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Tuple ) -> str:
import gzip
__snake_case : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt.gz' )
with open(_UpperCAmelCase ,'rb' ) as orig_file:
with gzip.open(_UpperCAmelCase ,'wb' ) as zipped_file:
zipped_file.writelines(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : List[Any] ) -> Optional[int]:
import gzip
__snake_case : str = str(tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.gz' )
with open(_UpperCAmelCase ,'rb' ) as orig_file:
with gzip.open(_UpperCAmelCase ,'wb' ) as zipped_file:
zipped_file.writelines(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : str ,_UpperCAmelCase : int ) -> Union[str, Any]:
__snake_case : Any = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : int ) -> Tuple:
__snake_case : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.join('nested' ,os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
__snake_case : Tuple = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.jsonl.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.join('main_dir' ,os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase ,arcname=os.path.join('main_dir' ,os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Any ) -> int:
__snake_case : Optional[Any] = tmp_path_factory.mktemp('data' ) / 'dataset.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase ,'w' ) as f:
f.add(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
f.add(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ) -> Dict:
__snake_case : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset_nested.jsonl.tar'
with tarfile.TarFile(_UpperCAmelCase ,'w' ) as f:
f.add(_UpperCAmelCase ,arcname=os.path.join('nested' ,os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Union[str, Any] ) -> int:
__snake_case : int = ['0', '1', '2', '3']
__snake_case : Dict = str(tmp_path_factory.mktemp('data' ) / 'dataset.txt' )
with open(_UpperCAmelCase ,'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[str] ) -> Dict:
__snake_case : Optional[int] = ['0', '1', '2', '3']
__snake_case : List[Any] = str(tmp_path_factory.mktemp('data' ) / 'dataset2.txt' )
with open(_UpperCAmelCase ,'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[Any] ) -> Optional[int]:
__snake_case : Dict = ['0', '1', '2', '3']
__snake_case : str = tmp_path_factory.mktemp('data' ) / 'dataset.abc'
with open(_UpperCAmelCase ,'w' ) as f:
for item in data:
f.write(item + '\n' )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[int] ) -> Tuple:
__snake_case : Optional[int] = tmp_path_factory.mktemp('data' ) / 'dataset.text.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Any ) -> Optional[int]:
__snake_case : str = tmp_path_factory.mktemp('data' ) / 'dataset_with_dir.text.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.join('main_dir' ,os.path.basename(_UpperCAmelCase ) ) )
f.write(_UpperCAmelCase ,arcname=os.path.join('main_dir' ,os.path.basename(_UpperCAmelCase ) ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Dict ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : int ) -> List[str]:
__snake_case : str = tmp_path_factory.mktemp('data' ) / 'dataset.ext.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.basename('unsupported.ext' ) )
f.write(_UpperCAmelCase ,arcname=os.path.basename('unsupported_2.ext' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Optional[int] ) -> int:
__snake_case : List[Any] = '\n'.join(['First', 'Second\u2029with Unicode new line', 'Third'] )
__snake_case : List[str] = str(tmp_path_factory.mktemp('data' ) / 'dataset_with_unicode_new_lines.txt' )
with open(_UpperCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(_UpperCAmelCase )
return path
@pytest.fixture(scope='session' )
def a_ ( ) -> List[str]:
return os.path.join('tests' ,'features' ,'data' ,'test_image_rgb.jpg' )
@pytest.fixture(scope='session' )
def a_ ( ) -> int:
return os.path.join('tests' ,'features' ,'data' ,'test_audio_44100.wav' )
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ) -> Dict:
__snake_case : List[str] = tmp_path_factory.mktemp('data' ) / 'dataset.img.zip'
with zipfile.ZipFile(_UpperCAmelCase ,'w' ) as f:
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ) )
f.write(_UpperCAmelCase ,arcname=os.path.basename(_UpperCAmelCase ).replace('.jpg' ,'2.jpg' ) )
return path
@pytest.fixture(scope='session' )
def a_ ( _UpperCAmelCase : int ) -> Optional[Any]:
__snake_case : Tuple = tmp_path_factory.mktemp('data_dir' )
(data_dir / "subdir").mkdir()
with open(data_dir / 'subdir' / 'train.txt' ,'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / 'subdir' / 'test.txt' ,'w' ) as f:
f.write('bar\n' * 10 )
# hidden file
with open(data_dir / 'subdir' / '.test.txt' ,'w' ) as f:
f.write('bar\n' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '.subdir' / 'train.txt' ,'w' ) as f:
f.write('foo\n' * 10 )
with open(data_dir / '.subdir' / 'test.txt' ,'w' ) as f:
f.write('bar\n' * 10 )
return data_dir
| 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class snake_case__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
A__ = ShapEPipeline
A__ = ['''prompt''']
A__ = ['''prompt''']
A__ = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
A__ = False
@property
def A_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return 32
@property
def A_ ( self : str ) -> Optional[int]:
'''simple docstring'''
return 32
@property
def A_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def A_ ( self : Tuple ) -> Dict:
'''simple docstring'''
return 8
@property
def A_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
__snake_case : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def A_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(__a )
@property
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Dict = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__snake_case : Optional[Any] = PriorTransformer(**__a )
return model
@property
def A_ ( self : Dict ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case : Tuple = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__snake_case : Optional[int] = ShapERenderer(**__a )
return model
def A_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
__snake_case : Tuple = self.dummy_prior
__snake_case : Union[str, Any] = self.dummy_text_encoder
__snake_case : List[str] = self.dummy_tokenizer
__snake_case : Optional[Any] = self.dummy_renderer
__snake_case : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=__a , clip_sample=__a , clip_sample_range=1.0 , )
__snake_case : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def A_ ( self : Union[str, Any] , __a : Dict , __a : int=0 ) -> Optional[Any]:
'''simple docstring'''
if str(__a ).startswith('mps' ):
__snake_case : List[str] = torch.manual_seed(__a )
else:
__snake_case : Optional[Any] = torch.Generator(device=__a ).manual_seed(__a )
__snake_case : Optional[int] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def A_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
__snake_case : Dict = 'cpu'
__snake_case : Dict = self.get_dummy_components()
__snake_case : int = self.pipeline_class(**__a )
__snake_case : str = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[Any] = pipe(**self.get_dummy_inputs(__a ) )
__snake_case : Dict = output.images[0]
__snake_case : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__snake_case : str = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self : Any ) -> List[str]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : int ) -> Tuple:
'''simple docstring'''
__snake_case : int = torch_device == 'cpu'
__snake_case : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__a , relax_max_difference=__a , )
def A_ ( self : List[str] ) -> Dict:
'''simple docstring'''
__snake_case : str = self.get_dummy_components()
__snake_case : Tuple = self.pipeline_class(**__a )
__snake_case : Dict = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : int = 1
__snake_case : Tuple = 2
__snake_case : Tuple = self.get_dummy_inputs(__a )
for key in inputs.keys():
if key in self.batch_params:
__snake_case : Union[str, Any] = batch_size * [inputs[key]]
__snake_case : str = pipe(**__a , num_images_per_prompt=__a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def A_ ( self : str ) -> Dict:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__snake_case : Union[str, Any] = ShapEPipeline.from_pretrained('openai/shap-e' )
__snake_case : Any = pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__snake_case : Optional[int] = torch.Generator(device=__a ).manual_seed(0 )
__snake_case : Union[str, Any] = pipe(
'a shark' , generator=__a , guidance_scale=1_5.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__a , __a )
| 0 | 1 |
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __lowercase (datasets.BuilderConfig ):
"""simple docstring"""
_snake_case = None
class __lowercase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
_snake_case = PandasConfig
def UpperCAmelCase ( self ) -> Any:
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase ( self , A ) -> Tuple:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
snake_case : Dict = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A , (str, list, tuple) ):
snake_case : Optional[Any] = data_files
if isinstance(A , A ):
snake_case : str = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case : Dict = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
snake_case : Optional[int] = []
for split_name, files in data_files.items():
if isinstance(A , A ):
snake_case : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case : Optional[int] = [dl_manager.iter_files(A ) for file in files]
splits.append(datasets.SplitGenerator(name=A , gen_kwargs={"""files""": files} ) )
return splits
def UpperCAmelCase ( self , A ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case : Optional[Any] = table_cast(A , self.config.features.arrow_schema )
return pa_table
def UpperCAmelCase ( self , A ) -> Tuple:
for i, file in enumerate(itertools.chain.from_iterable(A ) ):
with open(A , """rb""" ) as f:
snake_case : List[str] = pa.Table.from_pandas(pd.read_pickle(A ) )
yield i, self._cast_table(A )
| 124 |
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
snake_case : Optional[Any] = sum(lowercase ) / len(lowercase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 124 | 1 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
UpperCAmelCase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
UpperCAmelCase : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
UpperCAmelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
with open(__lowerCAmelCase , """rb""" ) as f:
lowercase_ = Image.open(__lowerCAmelCase )
return im.convert("""RGB""" )
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "A folder containing the training data."} )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "A folder containing the validation data."} )
lowercase__ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""")
@dataclass
class SCREAMING_SNAKE_CASE__ :
lowercase__ = field(
default="google/vit-base-patch16-224-in21k" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__UpperCAmelCase )} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
lowercase__ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
lowercase__ = field(default=__UpperCAmelCase , metadata={"help": "Name or path of preprocessor config."} )
lowercase__ = field(
default=__UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
lowercase__ = field(
default=__UpperCAmelCase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = torch.stack([example["""pixel_values"""] for example in examples] )
lowercase_ = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _SCREAMING_SNAKE_CASE () -> Optional[int]:
'''simple docstring'''
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase_ , lowercase_ , lowercase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , __lowerCAmelCase , __lowerCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase_ = training_args.get_process_log_level()
logger.setLevel(__lowerCAmelCase )
transformers.utils.logging.set_verbosity(__lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase_ = {}
if data_args.train_dir is not None:
lowercase_ = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
lowercase_ = os.path.join(data_args.validation_dir , """**""" )
lowercase_ = load_dataset(
"""imagefolder""" , data_files=__lowerCAmelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase_ = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __lowerCAmelCase ) and data_args.train_val_split > 0.0:
lowercase_ = dataset["""train"""].train_test_split(data_args.train_val_split )
lowercase_ = split["""train"""]
lowercase_ = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase_ = dataset["""train"""].features["""labels"""].names
lowercase_ , lowercase_ = {}, {}
for i, label in enumerate(__lowerCAmelCase ):
lowercase_ = str(__lowerCAmelCase )
lowercase_ = label
# Load the accuracy metric from the datasets package
lowercase_ = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__lowerCAmelCase ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__lowerCAmelCase ) , labelaid=__lowerCAmelCase , idalabel=__lowerCAmelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase_ = image_processor.size["""shortest_edge"""]
else:
lowercase_ = (image_processor.size["""height"""], image_processor.size["""width"""])
lowercase_ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase_ = Compose(
[
RandomResizedCrop(__lowerCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase_ = Compose(
[
Resize(__lowerCAmelCase ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__lowerCAmelCase ):
lowercase_ = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__lowerCAmelCase ):
lowercase_ = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
lowercase_ = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__lowerCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
lowercase_ = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__lowerCAmelCase )
# Initalize our trainer
lowercase_ = Trainer(
model=__lowerCAmelCase , args=__lowerCAmelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=__lowerCAmelCase , tokenizer=__lowerCAmelCase , data_collator=__lowerCAmelCase , )
# Training
if training_args.do_train:
lowercase_ = None
if training_args.resume_from_checkpoint is not None:
lowercase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase_ = last_checkpoint
lowercase_ = trainer.train(resume_from_checkpoint=__lowerCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase_ = trainer.evaluate()
trainer.log_metrics("""eval""" , __lowerCAmelCase )
trainer.save_metrics("""eval""" , __lowerCAmelCase )
# Write model card and (optionally) push to hub
lowercase_ = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCAmelCase )
else:
trainer.create_model_card(**__lowerCAmelCase )
if __name__ == "__main__":
main()
| 365 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x20000 and cp <= 0x2a6df) #
or (cp >= 0x2a700 and cp <= 0x2b73f) #
or (cp >= 0x2b740 and cp <= 0x2b81f) #
or (cp >= 0x2b820 and cp <= 0x2ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2f800 and cp <= 0x2fa1f) #
): #
return True
return False
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> List[str]:
'''simple docstring'''
for char in word:
lowercase_ = ord(__lowerCAmelCase )
if not _is_chinese_char(__lowerCAmelCase ):
return 0
return 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Any:
'''simple docstring'''
lowercase_ = set()
for token in tokens:
lowercase_ = len(__lowerCAmelCase ) > 1 and is_chinese(__lowerCAmelCase )
if chinese_word:
word_set.add(__lowerCAmelCase )
lowercase_ = list(__lowerCAmelCase )
return word_list
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
lowercase_ = max([len(__lowerCAmelCase ) for w in chinese_word_set] )
lowercase_ = bert_tokens
lowercase_ , lowercase_ = 0, len(__lowerCAmelCase )
while start < end:
lowercase_ = True
if is_chinese(bert_word[start] ):
lowercase_ = min(end - start , __lowerCAmelCase )
for i in range(__lowerCAmelCase , 1 , -1 ):
lowercase_ = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase_ = """##""" + bert_word[j]
lowercase_ = start + i
lowercase_ = False
break
if single_word:
start += 1
return bert_word
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowercase_ = [get_chinese_word(__lowerCAmelCase ) for r in res]
ltp_res.extend(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for i in range(0 , len(__lowerCAmelCase ) , 1_00 ):
lowercase_ = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=5_12 )
bert_res.extend(res["""input_ids"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
lowercase_ = []
for input_ids, chinese_word in zip(__lowerCAmelCase , __lowerCAmelCase ):
lowercase_ = []
for id in input_ids:
lowercase_ = bert_tokenizer._convert_id_to_token(__lowerCAmelCase )
input_tokens.append(__lowerCAmelCase )
lowercase_ = add_sub_symbol(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__lowerCAmelCase ):
if token[:2] == "##":
lowercase_ = token[2:]
# save chinese tokens' pos
if len(__lowerCAmelCase ) == 1 and _is_chinese_char(ord(__lowerCAmelCase ) ):
ref_id.append(__lowerCAmelCase )
ref_ids.append(__lowerCAmelCase )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
return ref_ids
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> int:
'''simple docstring'''
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
lowercase_ = f.readlines()
lowercase_ = [line.strip() for line in data if len(__lowerCAmelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase_ = LTP(args.ltp ) # faster in GPU device
lowercase_ = BertTokenizer.from_pretrained(args.bert )
lowercase_ = prepare_ref(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
lowercase_ = [json.dumps(__lowerCAmelCase ) + """\n""" for ref in ref_ids]
f.writelines(__lowerCAmelCase )
if __name__ == "__main__":
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
UpperCAmelCase : int = parser.parse_args()
main(args)
| 313 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__A : Optional[int] = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __UpperCamelCase ( _A : Dict=None ) ->Dict:
"""simple docstring"""
if subparsers is not None:
lowerCamelCase_ =subparsers.add_parser("""tpu-config""" , description=_description )
else:
lowerCamelCase_ =argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
lowerCamelCase_ =parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=_A , default=_A , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=_A , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=_A , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
lowerCamelCase_ =parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=_A , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=_A )
return parser
def __UpperCamelCase ( _A : Tuple ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_A ):
lowerCamelCase_ =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowerCamelCase_ =defaults.command_file
if not args.command and defaults.commands is not None:
lowerCamelCase_ =defaults.commands
if not args.tpu_name:
lowerCamelCase_ =defaults.tpu_name
if not args.tpu_zone:
lowerCamelCase_ =defaults.tpu_zone
if args.accelerate_version == "dev":
lowerCamelCase_ ="""git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
lowerCamelCase_ ="""accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , _A ):
lowerCamelCase_ =f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
lowerCamelCase_ =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _A ):
lowerCamelCase_ =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowerCamelCase_ =["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
lowerCamelCase_ ="""; """.join(_A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowerCamelCase_ =["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(_A )}' )
return
subprocess.run(_A )
print("""Successfully setup pod.""" )
def __UpperCamelCase ( ) ->Optional[Any]:
"""simple docstring"""
lowerCamelCase_ =tpu_command_parser()
lowerCamelCase_ =parser.parse_args()
tpu_command_launcher(_A )
| 154 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __UpperCAmelCase :
__snake_case : CommonSchedulerState
# setable values
__snake_case : jnp.ndarray
__snake_case : jnp.ndarray
__snake_case : Optional[int] = None
@classmethod
def UpperCamelCase ( cls: List[Any] , UpperCAmelCase_: int , UpperCAmelCase_: List[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
return cls(common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ )
@dataclass
class __UpperCAmelCase (__lowerCAmelCase ):
__snake_case : DDPMSchedulerState
class __UpperCAmelCase (__lowerCAmelCase ,__lowerCAmelCase ):
__snake_case : List[str] = [e.name for e in FlaxKarrasDiffusionSchedulers]
__snake_case : jnp.dtype
@property
def UpperCamelCase ( self: int ):
'''simple docstring'''
return True
@register_to_config
def __init__( self: List[Any] , UpperCAmelCase_: Optional[int] = 1_000 , UpperCAmelCase_: int = 0.00_01 , UpperCAmelCase_: Optional[int] = 0.02 , UpperCAmelCase_: Any = "linear" , UpperCAmelCase_: Dict = None , UpperCAmelCase_: Dict = "fixed_small" , UpperCAmelCase_: Optional[Any] = True , UpperCAmelCase_: Any = "epsilon" , UpperCAmelCase_: Dict = jnp.floataa , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = dtype
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: List[Any] = None ):
'''simple docstring'''
if common is None:
_SCREAMING_SNAKE_CASE = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
_SCREAMING_SNAKE_CASE = jnp.array(1.0 , dtype=self.dtype )
_SCREAMING_SNAKE_CASE = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=lowerCAmelCase_ , init_noise_sigma=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def UpperCamelCase ( self: str , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Dict = None ):
'''simple docstring'''
return sample
def UpperCamelCase ( self: int , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any] = () ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
_SCREAMING_SNAKE_CASE = (jnp.arange(0 , lowerCAmelCase_ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: int , UpperCAmelCase_: Optional[Any]=None , UpperCAmelCase_: Any=None ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = state.common.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_SCREAMING_SNAKE_CASE = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
_SCREAMING_SNAKE_CASE = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
_SCREAMING_SNAKE_CASE = jnp.clip(lowerCAmelCase_ , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
_SCREAMING_SNAKE_CASE = jnp.log(jnp.clip(lowerCAmelCase_ , a_min=1E-20 ) )
elif variance_type == "fixed_large":
_SCREAMING_SNAKE_CASE = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
_SCREAMING_SNAKE_CASE = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
_SCREAMING_SNAKE_CASE = variance
_SCREAMING_SNAKE_CASE = state.common.betas[t]
_SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2
_SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] = None , UpperCAmelCase_: Dict = True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = timestep
if key is None:
_SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = jnp.split(lowerCAmelCase_ , sample.shape[1] , axis=1 )
else:
_SCREAMING_SNAKE_CASE = None
# 1. compute alphas, betas
_SCREAMING_SNAKE_CASE = state.common.alphas_cumprod[t]
_SCREAMING_SNAKE_CASE = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
_SCREAMING_SNAKE_CASE = 1 - alpha_prod_t
_SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_SCREAMING_SNAKE_CASE = model_output
elif self.config.prediction_type == "v_prediction":
_SCREAMING_SNAKE_CASE = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
""" for the FlaxDDPMScheduler.""" )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_SCREAMING_SNAKE_CASE = jnp.clip(lowerCAmelCase_ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
_SCREAMING_SNAKE_CASE = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
_SCREAMING_SNAKE_CASE = jax.random.split(lowerCAmelCase_ , num=1 )
_SCREAMING_SNAKE_CASE = jax.random.normal(lowerCAmelCase_ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(lowerCAmelCase_ , lowerCAmelCase_ , predicted_variance=lowerCAmelCase_ ) ** 0.5) * noise
_SCREAMING_SNAKE_CASE = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
_SCREAMING_SNAKE_CASE = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=lowerCAmelCase_ , state=lowerCAmelCase_ )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Dict , UpperCAmelCase_: Any , ):
'''simple docstring'''
return add_noise_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , ):
'''simple docstring'''
return get_velocity_common(state.common , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __len__( self: List[Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 365 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_84 ,qkv_bias=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(snake_case__ ,add_pooling_layer=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(snake_case__ )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=snake_case__ ,decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" ,check_hash=snake_case__ )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(snake_case__ ,snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_q_k_v(snake_case__ ,snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(snake_case__ ) ,return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=snake_case__ ,decoder_input_ids=snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,snake_case__ ,atol=1e-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 125 | 0 |
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
__lowerCAmelCase = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
__lowerCAmelCase = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
__lowerCAmelCase = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
_snake_case = new_id
# turn into Numpy arrays
_snake_case = np.array(lowercase__ )
_snake_case = np.array(lowercase__ )
if reduce_labels:
_snake_case = 255
_snake_case = label - 1
_snake_case = 255
_snake_case = label != ignore_index
_snake_case = np.not_equal(lowercase__ , lowercase__ )
_snake_case = pred_label[mask]
_snake_case = np.array(lowercase__ )[mask]
_snake_case = pred_label[pred_label == label]
_snake_case = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
_snake_case = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
_snake_case = np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
_snake_case = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
_snake_case = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase__ , lowercase__ ):
_snake_case = intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
_snake_case = total_intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# compute metrics
_snake_case = {}
_snake_case = total_area_intersect.sum() / total_area_label.sum()
_snake_case = total_area_intersect / total_area_union
_snake_case = total_area_intersect / total_area_label
_snake_case = np.nanmean(lowercase__ )
_snake_case = np.nanmean(lowercase__ )
_snake_case = all_acc
_snake_case = iou
_snake_case = acc
if nan_to_num is not None:
_snake_case = {metric: np.nan_to_num(lowercase__ , nan=lowercase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def lowercase (self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , ) -> Tuple:
_snake_case = mean_iou(
results=lowerCAmelCase__ , gt_seg_maps=lowerCAmelCase__ , num_labels=lowerCAmelCase__ , ignore_index=lowerCAmelCase__ , nan_to_num=lowerCAmelCase__ , label_map=lowerCAmelCase__ , reduce_labels=lowerCAmelCase__ , )
return iou_result | 341 |
from __future__ import annotations
import bisect
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Any = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE : Union[str, Any] = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
if hi < 0:
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ )
while lo < hi:
__SCREAMING_SNAKE_CASE : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE : Any = mid + 1
else:
__SCREAMING_SNAKE_CASE : Optional[int] = mid
return lo
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ = 0 , lowercase__ = -1 ):
sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : List[Any] = len(lowercase__ ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
__SCREAMING_SNAKE_CASE : List[str] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE : int = midpoint - 1
else:
__SCREAMING_SNAKE_CASE : Dict = midpoint + 1
return None
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ )
if index != len(lowercase__ ) and sorted_collection[index] == item:
return index
return None
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if right < left:
return None
__SCREAMING_SNAKE_CASE : int = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 )
else:
return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : Dict =input('Enter numbers separated by comma:\n').strip()
__lowerCAmelCase : str =sorted(int(item) for item in user_input.split(','))
__lowerCAmelCase : Tuple =int(input('Enter a single number to be found in the list:\n'))
__lowerCAmelCase : Tuple =binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 9 | 0 |
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict=False ) -> List[Any]:
"""simple docstring"""
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(UpperCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
__A = parse_flag_from_env("RUN_SLOW", default=False)
__A = parse_flag_from_env("RUN_REMOTE", default=False)
__A = parse_flag_from_env("RUN_LOCAL", default=True)
__A = parse_flag_from_env("RUN_PACKAGED", default=True)
# Compression
__A = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4")
__A = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr")
__A = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard")
# Audio
__A = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"),
reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ",
)
# Beam
__A = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"),
reason="test requires apache-beam and a compatible dill version",
)
# Dill-cloudpickle compatibility
__A = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("0.3.2"),
reason="test requires dill>0.3.2 for cloudpickle compatibility",
)
# Windows
__A = pytest.mark.skipif(
sys.platform == "win32",
reason="test should not be run on Windows",
)
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
__lowerCamelCase = unittest.skip('test requires faiss' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : int ) -> Union[str, Any]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
__lowerCamelCase = unittest.skip('test requires regex' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
__lowerCamelCase = unittest.skip('test requires elasticsearch' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> Optional[Any]:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
__lowerCamelCase = unittest.skip('test requires sqlalchemy' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Optional[int]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
__lowerCamelCase = unittest.skip('test requires PyTorch' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
__lowerCamelCase = unittest.skip('test requires TensorFlow' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
__lowerCamelCase = unittest.skip('test requires JAX' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
if not config.PIL_AVAILABLE:
__lowerCamelCase = unittest.skip('test requires Pillow' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(UpperCamelCase__ )
else:
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> Any:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(UpperCamelCase__ )
else:
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> List[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(UpperCamelCase__ )
else:
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
def _require_spacy_model(UpperCamelCase__ : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(UpperCamelCase__ )
except ImportError:
return unittest.skip('test requires spacy' )(UpperCamelCase__ )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(UpperCamelCase__ ) )(UpperCamelCase__ )
else:
return test_case
return _require_spacy_model
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(UpperCamelCase__ )
else:
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(UpperCamelCase__ )
else:
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> List[Any]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
__lowerCamelCase = unittest.skip('test is slow' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
__lowerCamelCase = unittest.skip('test is local' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Dict ) -> int:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
__lowerCamelCase = unittest.skip('test is packaged' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
__lowerCamelCase = unittest.skip('test requires remote' )(UpperCamelCase__ )
return test_case
def lowerCamelCase_ ( *UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
def decorate(cls : Any ):
for name, fn in cls.__dict__.items():
if callable(UpperCamelCase__ ) and name.startswith('test' ):
for decorator in decorators:
__lowerCamelCase = decorator(UpperCamelCase__ )
setattr(cls , UpperCamelCase__ , UpperCamelCase__ )
return cls
return decorate
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
pass
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
snake_case_ = 2
@contextmanager
def lowerCamelCase_ ( UpperCamelCase__ : int=OfflineSimulationMode.CONNECTION_FAILS , UpperCamelCase__ : Optional[Any]=1E-16 ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = requests.Session().request
def timeout_request(UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
__lowerCamelCase = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
__lowerCamelCase = timeout
try:
return online_request(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
__lowerCamelCase = url
__lowerCamelCase = e.args[0]
__lowerCamelCase = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
__lowerCamelCase = (max_retry_error,)
raise
def raise_connection_error(UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , **UpperCamelCase__ : Optional[Any] ):
raise requests.ConnectionError('Offline mode is enabled.' , request=UpperCamelCase__ )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , UpperCamelCase__ ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , UpperCamelCase__ ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCamelCase__ ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def lowerCamelCase_ ( *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*UpperCamelCase__ , **UpperCamelCase__ ) as tmp_dir:
try:
os.chdir(UpperCamelCase__ )
yield
finally:
os.chdir(UpperCamelCase__ )
@contextmanager
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
import gc
gc.collect()
__lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
__lowerCamelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return deepcopy(UpperCamelCase__ ).integers(0 , 100 , 10 ).tolist() == deepcopy(UpperCamelCase__ ).integers(0 , 100 , 10 ).tolist()
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> Optional[int]:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(UpperCamelCase__ : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Dict ):
try:
return func(*UpperCamelCase__ , **UpperCamelCase__ )
except HTTPError as err:
if str(UpperCamelCase__ ).startswith('500' ) or str(UpperCamelCase__ ).startswith('502' ):
pytest.xfail(str(UpperCamelCase__ ) )
raise err
return decorator.decorator(_wrapper , UpperCamelCase__ )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(UpperCamelCase__ )
else:
break
async def lowerCamelCase_ ( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[Any]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(UpperCamelCase__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=UpperCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=UpperCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Dict="" ):
__lowerCamelCase = line.decode('utf-8' ).rstrip()
sink.append(UpperCamelCase__ )
if not quiet:
print(UpperCamelCase__ , UpperCamelCase__ , file=UpperCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda UpperCamelCase__ : tee(UpperCamelCase__ , UpperCamelCase__ , sys.stderr , label='stderr:' ) ),
] , timeout=UpperCamelCase__ , )
return _RunOutput(await p.wait() , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : int=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[Any]=180 , UpperCamelCase__ : str=False , UpperCamelCase__ : Dict=True ) -> _RunOutput:
"""simple docstring"""
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(UpperCamelCase__ , env=UpperCamelCase__ , stdin=UpperCamelCase__ , timeout=UpperCamelCase__ , quiet=UpperCamelCase__ , echo=UpperCamelCase__ ) )
__lowerCamelCase = ' '.join(UpperCamelCase__ )
if result.returncode > 0:
__lowerCamelCase = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
__lowerCamelCase = re.sub(R'^gw' , '' , UpperCamelCase__ , 0 , re.M )
return int(UpperCamelCase__ )
def lowerCamelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = 2_9500
__lowerCamelCase = pytest_xdist_worker_id()
return port + uniq_delta
| 366 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Any:
"""simple docstring"""
__lowerCamelCase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
__lowerCamelCase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
__lowerCamelCase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCamelCase = [4, 4, 4, 4]
__lowerCamelCase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCamelCase = [3, 3, 3, 3]
else:
__lowerCamelCase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCamelCase = 96
elif "small" in model_name:
__lowerCamelCase = 96
elif "base" in model_name:
__lowerCamelCase = 128
elif "large" in model_name:
__lowerCamelCase = 192
elif "xlarge" in model_name:
__lowerCamelCase = 256
elif "huge" in model_name:
__lowerCamelCase = 352
# set label information
__lowerCamelCase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCamelCase = 'imagenet-22k-id2label.json'
else:
__lowerCamelCase = 'imagenet-1k-id2label.json'
__lowerCamelCase = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase = {v: k for k, v in idalabel.items()}
__lowerCamelCase = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def lowerCamelCase_ ( UpperCamelCase__ : Any ) -> str:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCamelCase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCamelCase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCamelCase = 'encoder.' + name
if "encoder.layers" in name:
__lowerCamelCase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCamelCase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCamelCase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCamelCase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCamelCase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCamelCase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCamelCase = 'layernorm.weight'
if name == "norm.bias":
__lowerCamelCase = 'layernorm.bias'
if "head" in name:
__lowerCamelCase = name.replace('head' , 'classifier' )
else:
__lowerCamelCase = 'focalnet.' + name
return name
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ) -> Dict:
"""simple docstring"""
__lowerCamelCase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCamelCase = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCamelCase = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCamelCase = state_dict.pop(UpperCamelCase__ )
__lowerCamelCase = val
__lowerCamelCase = get_focalnet_config(UpperCamelCase__ )
__lowerCamelCase = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCamelCase = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=224 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCamelCase = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCamelCase = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCamelCase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
__lowerCamelCase = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCamelCase = model(**UpperCamelCase__ )
__lowerCamelCase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCamelCase = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
__lowerCamelCase = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
__lowerCamelCase = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
__lowerCamelCase = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
__lowerCamelCase = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
__lowerCamelCase = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="focalnet-tiny",
type=str,
help="Name of the FocalNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub.",
)
__A = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 348 | 0 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowercase__ ( __lowercase : List[Any] ) -> Optional[int]:
"""simple docstring"""
def wrapper(*__lowercase : List[Any] , **__lowercase : Any ):
__UpperCamelCase = timeit.default_timer()
__UpperCamelCase = func(*__lowercase , **__lowercase )
__UpperCamelCase = timeit.default_timer() - starttime
return delta
__UpperCamelCase = func.__name__
return wrapper
def lowercase__ ( __lowercase : dict , __lowercase : Optional[Any]=100 , __lowercase : Dict=None ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = []
__UpperCamelCase = seq_shapes or {}
for i in range(__lowercase ):
__UpperCamelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowercase , _ArrayXD ):
__UpperCamelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowercase , datasets.Value ):
if v.dtype == "string":
__UpperCamelCase = 'The small grey turtle was surprisingly fast when challenged.'
else:
__UpperCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowercase , datasets.Sequence ):
while isinstance(__lowercase , datasets.Sequence ):
__UpperCamelCase = v.feature
__UpperCamelCase = seq_shapes[k]
__UpperCamelCase = np.random.rand(*__lowercase ).astype(v.dtype )
__UpperCamelCase = data
dummy_data.append((i, example) )
return dummy_data
def lowercase__ ( __lowercase : Tuple , __lowercase : Dict , __lowercase : str=100 , __lowercase : List[str]=None ) -> Dict:
"""simple docstring"""
__UpperCamelCase = generate_examples(__lowercase , num_examples=__lowercase , seq_shapes=__lowercase )
with ArrowWriter(features=__lowercase , path=__lowercase ) as writer:
for key, record in dummy_data:
__UpperCamelCase = features.encode_example(__lowercase )
writer.write(__lowercase )
__UpperCamelCase , __UpperCamelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
__UpperCamelCase = datasets.Dataset.from_file(filename=__lowercase , info=datasets.DatasetInfo(features=__lowercase ) )
return dataset
| 53 |
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = ['''image_processor''', '''tokenizer''']
A : Optional[Any] = '''BlipImageProcessor'''
A : Any = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = False
super().__init__(A, A )
SCREAMING_SNAKE_CASE : Any = self.image_processor
def __call__( self, A = None, A = None, A = True, A = False, A = None, A = None, A = 0, A = None, A = None, A = False, A = False, A = False, A = False, A = False, A = True, A = None, **A, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE : int = self.tokenizer
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(A, return_tensors=A )
if text is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
else:
SCREAMING_SNAKE_CASE : int = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A, **A )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.decode(*A, **A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 251 | 0 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCAmelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ =["audio_values", "audio_mask"]
def __init__( self , _A=2048 , _A=1 , _A=[16, 16] , _A=128 , _A=44100 , _A=86 , _A=2048 , _A=0.0 , **_A , ) -> str:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , **_A , )
SCREAMING_SNAKE_CASE_ = spectrogram_length
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = feature_size // self.patch_size[1]
SCREAMING_SNAKE_CASE_ = n_fft
SCREAMING_SNAKE_CASE_ = sampling_rate // hop_length_to_sampling_rate
SCREAMING_SNAKE_CASE_ = sampling_rate
SCREAMING_SNAKE_CASE_ = padding_value
SCREAMING_SNAKE_CASE_ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , ).T
def _UpperCamelCase ( self , _A ) -> np.ndarray:
SCREAMING_SNAKE_CASE_ = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
SCREAMING_SNAKE_CASE_ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE_ = log_spec - 20.0
SCREAMING_SNAKE_CASE_ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , _A , _A = None , _A = True , _A = None , _A = False , _A = False , **_A , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
F''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE_ = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE_ = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
SCREAMING_SNAKE_CASE_ = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE_ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
SCREAMING_SNAKE_CASE_ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _A ):
SCREAMING_SNAKE_CASE_ = [np.asarray(_A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
SCREAMING_SNAKE_CASE_ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
SCREAMING_SNAKE_CASE_ = np.array(_A ).astype(np.floataa )
# convert into correct format for padding
SCREAMING_SNAKE_CASE_ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
SCREAMING_SNAKE_CASE_ = np.ones([len(_A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ = padded_audio_features * self.padding_value
for i in range(len(_A ) ):
SCREAMING_SNAKE_CASE_ = audio_features[i]
SCREAMING_SNAKE_CASE_ = feature
# return as BatchFeature
if return_attention_mask:
SCREAMING_SNAKE_CASE_ = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
SCREAMING_SNAKE_CASE_ = {"""audio_values""": padded_audio_features}
SCREAMING_SNAKE_CASE_ = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 351 |
__UpperCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__UpperCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
__UpperCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 257 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Any = 1_6
_lowerCAmelCase : str = 3_2
def lowerCAmelCase ( _lowerCAmelCase : Accelerator , _lowerCAmelCase : int = 16 ):
"""simple docstring"""
UpperCAmelCase__ = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCAmelCase__ = load_dataset("glue" , "mrpc" )
def tokenize_function(_lowerCAmelCase : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase__ = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(_lowerCAmelCase : str ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase__ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase__ = 8
else:
UpperCAmelCase__ = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
UpperCAmelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
UpperCAmelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : str = mocked_dataloaders # noqa: F811
def lowerCAmelCase ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
UpperCAmelCase__ = 2
# Initialize accelerator
UpperCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase__ = config["lr"]
UpperCAmelCase__ = int(config["num_epochs"] )
UpperCAmelCase__ = int(config["seed"] )
UpperCAmelCase__ = int(config["batch_size"] )
UpperCAmelCase__ = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase__ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase__ = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
UpperCAmelCase__ = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase__ = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase__ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase__ = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase__ = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase__ = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.loss
UpperCAmelCase__ = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
UpperCAmelCase__ = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase__ = model(**_lowerCAmelCase )
UpperCAmelCase__ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase__ = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
UpperCAmelCase__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCAmelCase__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
UpperCAmelCase__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
def lowerCAmelCase ( ):
"""simple docstring"""
UpperCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 169 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Any =tempfile.mkdtemp()
# fmt: off
a__ : List[Any] =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
a__ : str =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a__ : List[Any] =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
a__ : Optional[int] ={"unk_token": "<unk>"}
a__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
a__ : Optional[Any] ={
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
a__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self , **lowerCAmelCase__ ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : Optional[Any] =[np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
a__ : List[Any] =[Image.fromarray(np.moveaxis(lowerCAmelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] =self.get_tokenizer()
a__ : int =self.get_rust_tokenizer()
a__ : List[str] =self.get_image_processor()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase__ )
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
a__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase__ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : str =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : int =self.get_image_processor(do_normalize=lowerCAmelCase__ , padding_value=1.0 )
a__ : Optional[Any] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCAmelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : str =self.get_image_processor()
a__ : Optional[int] =self.get_tokenizer()
a__ : Dict =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : str =self.prepare_image_inputs()
a__ : Any =image_processor(lowerCAmelCase__ , return_tensors="np" )
a__ : Optional[int] =processor(images=lowerCAmelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : List[Any] =self.get_tokenizer()
a__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Union[str, Any] ="lower newer"
a__ : List[str] =processor(text=lowerCAmelCase__ )
a__ : str =tokenizer(lowerCAmelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Any =self.get_image_processor()
a__ : Dict =self.get_tokenizer()
a__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict ="lower newer"
a__ : int =self.prepare_image_inputs()
a__ : Any =processor(text=lowerCAmelCase__ , images=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =self.get_image_processor()
a__ : Optional[Any] =self.get_tokenizer()
a__ : str =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : int =self.prepare_image_inputs()
a__ : Union[str, Any] =self.prepare_image_inputs()
a__ : Tuple =processor(images=lowerCAmelCase__ , visual_prompt=lowerCAmelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__ ):
processor()
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] =self.get_image_processor()
a__ : Any =self.get_tokenizer()
a__ : Tuple =CLIPSegProcessor(tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
a__ : Dict =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : Optional[Any] =processor.batch_decode(lowerCAmelCase__ )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 | 0 |
from heapq import heappop, heappush
import numpy as np
def __snake_case ( __UpperCamelCase : np.ndarray ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : tuple[int, int] ,__UpperCamelCase : bool ,):
"""simple docstring"""
A_ , A_ = grid.shape
A_ = [-1, 1, 0, 0]
A_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A_ , A_ = [(0, source)], set()
A_ = np.full((rows, cols) ,np.inf )
A_ = 0
A_ = np.empty((rows, cols) ,dtype=__UpperCamelCase )
A_ = None
while queue:
((A_) , (A_)) = heappop(__UpperCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A_ = []
while (x, y) != source:
path.append((x, y) )
A_ , A_ = predecessors[x, y]
path.append(__UpperCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__UpperCamelCase ) ):
A_ , A_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__UpperCamelCase ,(dist + 1, (nx, ny)) )
A_ = dist + 1
A_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod() | 364 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Any = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[int] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Tuple , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[str] = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Any , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Dict = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Dict ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
class _a ( metaclass=snake_case_ ):
"""simple docstring"""
_lowerCamelCase : List[Any] = ['torch', 'transformers', 'onnx']
def __init__( self : str , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ):
requires_backends(self , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : List[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ):
requires_backends(cls , ["torch", "transformers", "onnx"] )
@classmethod
def __A ( cls : Optional[int] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ):
requires_backends(cls , ["torch", "transformers", "onnx"] ) | 329 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def __SCREAMING_SNAKE_CASE ( A_ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_e_0_0 and cp <= 0X9_f_f_f)
or (cp >= 0X3_4_0_0 and cp <= 0X4_d_b_f) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_a_6_d_f) #
or (cp >= 0X2_a_7_0_0 and cp <= 0X2_b_7_3_f) #
or (cp >= 0X2_b_7_4_0 and cp <= 0X2_b_8_1_f) #
or (cp >= 0X2_b_8_2_0 and cp <= 0X2_c_e_a_f) #
or (cp >= 0Xf_9_0_0 and cp <= 0Xf_a_f_f)
or (cp >= 0X2_f_8_0_0 and cp <= 0X2_f_a_1_f) #
): #
return True
return False
def __SCREAMING_SNAKE_CASE ( A_ ):
# word like '180' or '身高' or '神'
for char in word:
lowerCAmelCase__ : Tuple = ord(A_ )
if not _is_chinese_char(A_ ):
return 0
return 1
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : str = set()
for token in tokens:
lowerCAmelCase__ : str = len(A_ ) > 1 and is_chinese(A_ )
if chinese_word:
word_set.add(A_ )
lowerCAmelCase__ : str = list(A_ )
return word_list
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if not chinese_word_set:
return bert_tokens
lowerCAmelCase__ : int = max([len(A_ ) for w in chinese_word_set] )
lowerCAmelCase__ : Tuple = bert_tokens
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = 0, len(A_ )
while start < end:
lowerCAmelCase__ : int = True
if is_chinese(bert_word[start] ):
lowerCAmelCase__ : str = min(end - start , A_ )
for i in range(A_ , 1 , -1 ):
lowerCAmelCase__ : Dict = ''''''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowerCAmelCase__ : Any = '''##''' + bert_word[j]
lowerCAmelCase__ : str = start + i
lowerCAmelCase__ : List[Any] = False
break
if single_word:
start += 1
return bert_word
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ):
lowerCAmelCase__ : Dict = []
for i in range(0 , len(A_ ) , 1_00 ):
lowerCAmelCase__ : List[str] = ltp_tokenizer.seg(lines[i : i + 1_00] )[0]
lowerCAmelCase__ : Union[str, Any] = [get_chinese_word(A_ ) for r in res]
ltp_res.extend(A_ )
assert len(A_ ) == len(A_ )
lowerCAmelCase__ : Union[str, Any] = []
for i in range(0 , len(A_ ) , 1_00 ):
lowerCAmelCase__ : Tuple = bert_tokenizer(lines[i : i + 1_00] , add_special_tokens=A_ , truncation=A_ , max_length=5_12 )
bert_res.extend(res['''input_ids'''] )
assert len(A_ ) == len(A_ )
lowerCAmelCase__ : List[str] = []
for input_ids, chinese_word in zip(A_ , A_ ):
lowerCAmelCase__ : List[Any] = []
for id in input_ids:
lowerCAmelCase__ : Optional[Any] = bert_tokenizer._convert_id_to_token(A_ )
input_tokens.append(A_ )
lowerCAmelCase__ : Any = add_sub_symbol(A_ , A_ )
lowerCAmelCase__ : Dict = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A_ ):
if token[:2] == "##":
lowerCAmelCase__ : List[str] = token[2:]
# save chinese tokens' pos
if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ):
ref_id.append(A_ )
ref_ids.append(A_ )
assert len(A_ ) == len(A_ )
return ref_ids
def __SCREAMING_SNAKE_CASE ( A_ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : List[str] = f.readlines()
lowerCAmelCase__ : Union[str, Any] = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowerCAmelCase__ : int = LTP(args.ltp ) # faster in GPU device
lowerCAmelCase__ : str = BertTokenizer.from_pretrained(args.bert )
lowerCAmelCase__ : List[Any] = prepare_ref(A_ , A_ , A_ )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ : str = [json.dumps(A_ ) + '''\n''' for ref in ref_ids]
f.writelines(A_ )
if __name__ == "__main__":
__UpperCamelCase : List[str] = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''', type=str, default='''./resources/ltp''', help='''resources for LTP tokenizer, usually a path'''
)
parser.add_argument('''--bert''', type=str, default='''./resources/robert''', help='''resources for Bert tokenizer''')
parser.add_argument('''--save_path''', type=str, default='''./resources/ref.txt''', help='''path to save res''')
__UpperCamelCase : Optional[Any] = parser.parse_args()
main(args)
| 106 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a_ : Union[str, Any] = random.Random()
def a_ ( __snake_case : int , __snake_case : int=1.0 , __snake_case : Tuple=None , __snake_case : Union[str, Any]=None ) -> str:
"""simple docstring"""
if rng is None:
lowerCamelCase_ =global_rng
lowerCamelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase=7, lowerCAmelCase=400, lowerCAmelCase=2_000, lowerCAmelCase=24, lowerCAmelCase=24, lowerCAmelCase=0.0, lowerCAmelCase=16_000, lowerCAmelCase=True, lowerCAmelCase=True, ):
"""simple docstring"""
lowerCamelCase_ =parent
lowerCamelCase_ =batch_size
lowerCamelCase_ =min_seq_length
lowerCamelCase_ =max_seq_length
lowerCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCamelCase_ =feature_size
lowerCamelCase_ =num_mel_bins
lowerCamelCase_ =padding_value
lowerCamelCase_ =sampling_rate
lowerCamelCase_ =return_attention_mask
lowerCamelCase_ =do_normalize
def lowercase__ ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self, lowerCAmelCase=False, lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(lowerCAmelCase ):
return list(itertools.chain(*lowerCAmelCase ) )
if equal_length:
lowerCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCamelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : Any =SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(lowerCAmelCase, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =[np.asarray(lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
lowerCamelCase_ =feature_extractor(lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
lowerCamelCase_ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_features
lowerCamelCase_ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
# Test batched
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
lowerCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCamelCase_ =np.asarray(lowerCAmelCase )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase, lowerCAmelCase ):
self.assertTrue(np.allclose(lowerCAmelCase, lowerCAmelCase, atol=1e-3 ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase_ =[None, 16, None]
for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_attention_mask=lowerCAmelCase )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =['''longest''', '''max_length''', '''do_not_pad''']
lowerCamelCase_ =[None, 16, None]
for max_length, padding in zip(lowerCAmelCase, lowerCAmelCase ):
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, max_length=lowerCAmelCase, padding=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =[np.sum(lowerCAmelCase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding='''max_length''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding='''longest''', max_length=4, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
lowerCamelCase_ =[floats_list((1, x) )[0] for x in range(800, 1_400, 200 )]
lowerCamelCase_ =feature_extractor(
lowerCAmelCase, padding='''longest''', max_length=16, truncation=lowerCAmelCase, return_tensors='''np''', return_attention_mask=lowerCAmelCase, )
lowerCamelCase_ =inputs.input_features
lowerCamelCase_ =inputs.attention_mask
lowerCamelCase_ =np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase__ ( self ):
"""simple docstring"""
import torch
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =np.random.rand(100, 32 ).astype(np.floataa )
lowerCamelCase_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
lowerCamelCase_ =feature_extractor.pad([{'''input_features''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
from datasets import load_dataset
lowerCamelCase_ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
lowerCamelCase_ =ds.sort('''id''' ).select(range(lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
lowerCamelCase_ =self._load_datasamples(1 )
lowerCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCamelCase_ =feature_extractor(lowerCAmelCase, return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], lowerCAmelCase, atol=1e-4 ) )
| 75 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = DebertaVaTokenizer
SCREAMING_SNAKE_CASE = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _SCREAMING_SNAKE_CASE ( self: str) -> List[str]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : Any = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , unk_token="<unk>")
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self: List[str] , _SCREAMING_SNAKE_CASE: Tuple) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = "this is a test"
__lowerCAmelCase : Any = "this is a test"
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "<pad>"
__lowerCAmelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE) , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<pad>")
self.assertEqual(vocab_keys[1] , "<unk>")
self.assertEqual(vocab_keys[-1] , "[PAD]")
self.assertEqual(len(_SCREAMING_SNAKE_CASE) , 3_0001)
def _SCREAMING_SNAKE_CASE ( self: Any) -> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000)
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase : str = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.")
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : List[str] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : Any = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase : Tuple = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : List[str] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : List[str] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase : List[str] = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : Optional[int] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase : str = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : int = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
__lowerCAmelCase : Optional[int] = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = " \tHeLLo!how \n Are yoU? "
__lowerCAmelCase : Optional[int] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
__lowerCAmelCase : str = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , split_by_punct=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : Tuple = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
__lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
__lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE))
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = tokenizer.encode(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: int) -> int:
"""simple docstring"""
__lowerCAmelCase : str = "This is a test"
__lowerCAmelCase : Dict = [13, 1, 4398, 25, 21, 1289]
__lowerCAmelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase : Optional[Any] = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
__lowerCAmelCase : Dict = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = DebertaVaTokenizerFast(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
# fmt: off
__lowerCAmelCase : Dict = "I was born in 92000, and this is falsé."
__lowerCAmelCase : Optional[int] = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
__lowerCAmelCase : Optional[int] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
__lowerCAmelCase : Optional[int] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
__lowerCAmelCase : Tuple = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : str = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE)
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = DebertaVaTokenizer(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = tokenizer.encode("sequence builders")
__lowerCAmelCase : Union[str, Any] = tokenizer.encode("multi-sequence build")
__lowerCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _SCREAMING_SNAKE_CASE , )
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Any = {"input_ids": [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , ) | 369 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[str] = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 58 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope='''session''' )
def _a ( ) -> int:
a = 10
a = datasets.Features(
{
'''tokens''': datasets.Sequence(datasets.Value('''string''' ) ),
'''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ),
'''answers''': datasets.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
'''id''': datasets.Value('''int64''' ),
} )
a = datasets.Dataset.from_dict(
{
'''tokens''': [['''foo'''] * 5] * n,
'''labels''': [[1] * 5] * n,
'''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10,
'''id''': list(range(a ) ),
} , features=a , )
return dataset
@pytest.fixture(scope='''session''' )
def _a ( a :Optional[Any] , a :Tuple ) -> str:
a = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' )
dataset.map(cache_file_name=a )
return filename
# FILE_CONTENT + files
UpperCAmelCase__ = "\\n Text data.\n Second line of data."
@pytest.fixture(scope='''session''' )
def _a ( a :int ) -> Tuple:
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt'''
a = FILE_CONTENT
with open(a , '''w''' ) as f:
f.write(a )
return filename
@pytest.fixture(scope='''session''' )
def _a ( a :Dict ) -> List[str]:
import bza
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2'''
a = bytes(a , '''utf-8''' )
with bza.open(a , '''wb''' ) as f:
f.write(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Tuple ) -> str:
import gzip
a = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' )
a = bytes(a , '''utf-8''' )
with gzip.open(a , '''wb''' ) as f:
f.write(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :str ) -> Tuple:
if datasets.config.LZ4_AVAILABLE:
import lza.frame
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4'''
a = bytes(a , '''utf-8''' )
with lza.frame.open(a , '''wb''' ) as f:
f.write(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[str] , a :Any ) -> Dict:
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z'''
with pyazr.SevenZipFile(a , '''w''' ) as archive:
archive.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[Any] , a :Dict ) -> Tuple:
import tarfile
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar'''
with tarfile.TarFile(a , '''w''' ) as f:
f.add(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] ) -> str:
import lzma
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz'''
a = bytes(a , '''utf-8''' )
with lzma.open(a , '''wb''' ) as f:
f.write(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[Any] , a :Union[str, Any] ) -> Tuple:
import zipfile
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[Any] ) -> Any:
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
a = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst'''
a = bytes(a , '''utf-8''' )
with zstd.open(a , '''wb''' ) as f:
f.write(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :str ) -> Union[str, Any]:
a = tmp_path_factory.mktemp('''data''' ) / '''file.xml'''
a = textwrap.dedent(
'''\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>''' )
with open(a , '''w''' ) as f:
f.write(a )
return filename
UpperCAmelCase__ = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
UpperCAmelCase__ = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
UpperCAmelCase__ = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
UpperCAmelCase__ = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
UpperCAmelCase__ = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope='''session''' )
def _a ( ) -> Tuple:
return DATA_DICT_OF_LISTS
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] ) -> Optional[Any]:
a = datasets.Dataset.from_dict(a )
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' )
dataset.map(cache_file_name=a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Tuple ) -> Optional[Any]:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' )
with contextlib.closing(sqlitea.connect(a ) ) as con:
a = con.cursor()
cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' )
for item in DATA:
cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] ) -> Union[str, Any]:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' )
with open(a , '''w''' , newline='''''' ) as f:
a = csv.DictWriter(a , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] ) -> str:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' )
with open(a , '''w''' , newline='''''' ) as f:
a = csv.DictWriter(a , fieldnames=['''col_1''', '''col_2''', '''col_3'''] )
writer.writeheader()
for item in DATA:
writer.writerow(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Optional[int] , a :Union[str, Any] ) -> List[Any]:
import bza
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2'''
with open(a , '''rb''' ) as f:
a = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(a , '''wb''' ) as f:
f.write(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Dict , a :Optional[Any] , a :Optional[int] ) -> int:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] , a :List[str] , a :List[str] ) -> Tuple:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) )
f.write(a , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any , a :Union[str, Any] , a :str ) -> List[Any]:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.join('''main_dir''' , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join('''main_dir''' , os.path.basename(a ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any ) -> Any:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' )
a = pa.schema(
{
'''col_1''': pa.string(),
'''col_2''': pa.intaa(),
'''col_3''': pa.floataa(),
} )
with open(a , '''wb''' ) as f:
a = pq.ParquetWriter(a , schema=a )
a = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(a ) )] for k in DATA[0]} , schema=a )
writer.write_table(a )
writer.close()
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[Any] ) -> Any:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
a = {'''data''': DATA}
with open(a , '''w''' ) as f:
json.dump(a , a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Optional[Any] ) -> List[Any]:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' )
a = {'''data''': DATA_DICT_OF_LISTS}
with open(a , '''w''' ) as f:
json.dump(a , a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[Any] ) -> int:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' )
with open(a , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(a ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Optional[int] ) -> Optional[Any]:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' )
with open(a , '''w''' ) as f:
for item in DATA:
f.write(json.dumps(a ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any ) -> Dict:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' )
with open(a , '''w''' ) as f:
for item in DATA_312:
f.write(json.dumps(a ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any ) -> List[str]:
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' )
with open(a , '''w''' ) as f:
for item in DATA_STR:
f.write(json.dumps(a ) + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] , a :int ) -> str:
import gzip
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' )
with open(a , '''rb''' ) as orig_file:
with gzip.open(a , '''wb''' ) as zipped_file:
zipped_file.writelines(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any , a :Union[str, Any] ) -> List[Any]:
import gzip
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' )
with open(a , '''rb''' ) as orig_file:
with gzip.open(a , '''wb''' ) as zipped_file:
zipped_file.writelines(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :int , a :Optional[Any] , a :List[Any] ) -> Union[str, Any]:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any , a :Dict , a :str , a :Optional[Any] ) -> str:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.join('''nested''' , os.path.basename(a ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[str] , a :Optional[int] , a :Tuple ) -> int:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.join('''main_dir''' , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join('''main_dir''' , os.path.basename(a ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Optional[int] , a :int , a :Optional[Any] ) -> int:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar'''
with tarfile.TarFile(a , '''w''' ) as f:
f.add(a , arcname=os.path.basename(a ) )
f.add(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[str] , a :List[str] , a :str , a :Tuple ) -> Union[str, Any]:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar'''
with tarfile.TarFile(a , '''w''' ) as f:
f.add(a , arcname=os.path.join('''nested''' , os.path.basename(a ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[Any] ) -> Tuple:
a = ['''0''', '''1''', '''2''', '''3''']
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' )
with open(a , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any ) -> List[Any]:
a = ['''0''', '''1''', '''2''', '''3''']
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' )
with open(a , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] ) -> List[str]:
a = ['''0''', '''1''', '''2''', '''3''']
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc'''
with open(a , '''w''' ) as f:
for item in data:
f.write(item + '''\n''' )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Optional[Any] , a :List[str] , a :int ) -> List[Any]:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :List[str] , a :int , a :Dict ) -> int:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.join('''main_dir''' , os.path.basename(a ) ) )
f.write(a , arcname=os.path.join('''main_dir''' , os.path.basename(a ) ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Any , a :Tuple , a :List[str] ) -> Optional[Any]:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.basename('''unsupported.ext''' ) )
f.write(a , arcname=os.path.basename('''unsupported_2.ext''' ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Union[str, Any] ) -> Dict:
a = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] )
a = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(a )
return path
@pytest.fixture(scope='''session''' )
def _a ( ) -> str:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' )
@pytest.fixture(scope='''session''' )
def _a ( ) -> Dict:
return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' )
@pytest.fixture(scope='''session''' )
def _a ( a :int , a :int ) -> Optional[int]:
a = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip'''
with zipfile.ZipFile(a , '''w''' ) as f:
f.write(a , arcname=os.path.basename(a ) )
f.write(a , arcname=os.path.basename(a ).replace('''.jpg''' , '''2.jpg''' ) )
return path
@pytest.fixture(scope='''session''' )
def _a ( a :Dict ) -> Dict:
a = tmp_path_factory.mktemp('''data_dir''' )
(data_dir / "subdir").mkdir()
with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden file
with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f:
f.write('''foo\n''' * 10 )
with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f:
f.write('''bar\n''' * 10 )
return data_dir
| 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ShapEPipeline
__snake_case = ['''prompt''']
__snake_case = ['''prompt''']
__snake_case = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Any ) ->Tuple:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return 8
@property
def __lowerCAmelCase ( self : Tuple ) ->str:
"""simple docstring"""
a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __lowerCAmelCase ( self : Union[str, Any] ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(__UpperCAmelCase )
@property
def __lowerCAmelCase ( self : Dict ) ->Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
a = PriorTransformer(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
a = ShapERenderer(**__UpperCAmelCase )
return model
def __lowerCAmelCase ( self : List[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_prior
a = self.dummy_text_encoder
a = self.dummy_tokenizer
a = self.dummy_renderer
a = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1_024 , prediction_type='''sample''' , use_karras_sigmas=__UpperCAmelCase , clip_sample=__UpperCAmelCase , clip_sample_range=1.0 , )
a = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def __lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=0 ) ->Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images[0]
a = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
a = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCAmelCase ( self : Dict ) ->Optional[Any]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Optional[Any] ) ->Tuple:
"""simple docstring"""
a = torch_device == '''cpu'''
a = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=__UpperCAmelCase , relax_max_difference=__UpperCAmelCase , )
def __lowerCAmelCase ( self : str ) ->Optional[int]:
"""simple docstring"""
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = 1
a = 2
a = self.get_dummy_inputs(__UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
a = batch_size * [inputs[key]]
a = pipe(**__UpperCAmelCase , num_images_per_prompt=__UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
a = ShapEPipeline.from_pretrained('''openai/shap-e''' )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
a = pipe(
'''a shark''' , generator=__UpperCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 0 | 1 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
__snake_case = (DPMSolverSDEScheduler,)
__snake_case = 1_0
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
snake_case_ = {
'''num_train_timesteps''': 11_00,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''noise_sampler_seed''': 0,
}
config.update(**_UpperCAmelCase )
return config
def UpperCamelCase__ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875 ) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config(prediction_type='''v_prediction''' )
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453 ) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703 ) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125 ) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621 ) < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938 ) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312 ) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562 ) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326 ) < 1E-3
def UpperCamelCase__ ( self ):
snake_case_ = self.scheduler_classes[0]
snake_case_ = self.get_scheduler_config()
snake_case_ = scheduler_class(**_UpperCAmelCase , use_karras_sigmas=_UpperCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase )
snake_case_ = self.dummy_model()
snake_case_ = self.dummy_sample_deter.to(_UpperCAmelCase ) * scheduler.init_noise_sigma
snake_case_ = sample.to(_UpperCAmelCase )
for t in scheduler.timesteps:
snake_case_ = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ = output.prev_sample
snake_case_ = torch.sum(torch.abs(_UpperCAmelCase ) )
snake_case_ = torch.mean(torch.abs(_UpperCAmelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672 ) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811 ) < 1E-2 | 354 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 20 )-> int:
"""simple docstring"""
snake_case_ = 1
for i in range(1 , n + 1 ):
snake_case_ = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''') | 267 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 16 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
__UpperCamelCase : str = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
__UpperCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'latents'}
__UpperCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__UpperCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__UpperCamelCase : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _snake_case (self ):
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , attention_head_dim=(2, 4) , use_linear_projection=__lowercase , addition_embed_type='''text_time''' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
__lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule='''scaled_linear''' , timestep_spacing='''leading''' , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=32 , )
__lowerCAmelCase = CLIPTextModel(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = CLIPTextModelWithProjection(__lowercase )
__lowerCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' , local_files_only=__lowercase )
__lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''text_encoder_2''': text_encoder_a,
'''tokenizer_2''': tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _snake_case (self , __lowercase , __lowercase=0 ):
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowerCAmelCase = image / 2 + 0.5
if str(__lowercase ).startswith('''mps''' ):
__lowerCAmelCase = torch.manual_seed(__lowercase )
else:
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 5.0,
'''output_type''': '''numpy''',
'''strength''': 0.7_5,
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = sd_pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCAmelCase = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case (self ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _snake_case (self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _snake_case (self ):
pass
def _snake_case (self ):
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
__lowerCAmelCase = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
# forward without prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = negative_prompt
__lowerCAmelCase = 3 * [inputs['''prompt''']]
__lowerCAmelCase = sd_pipe(**__lowercase )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__lowerCAmelCase = self.get_dummy_inputs(__lowercase )
__lowerCAmelCase = 3 * ['''this is a negative prompt''']
__lowerCAmelCase = 3 * [inputs.pop('''prompt''' )]
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = sd_pipe.encode_prompt(__lowercase , negative_prompt=__lowercase )
__lowerCAmelCase = sd_pipe(
**__lowercase , prompt_embeds=__lowercase , negative_prompt_embeds=__lowercase , pooled_prompt_embeds=__lowercase , negative_pooled_prompt_embeds=__lowercase , )
__lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case (self , __lowercase , __lowercase="cpu" , __lowercase=torch.floataa , __lowercase=0 ):
__lowerCAmelCase = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowerCAmelCase = np.random.RandomState(__lowercase ).standard_normal((1, 4, 64, 64) )
__lowerCAmelCase = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__lowerCAmelCase = {
'''prompt''': '''a photograph of an astronaut riding a horse''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case (self ):
__lowerCAmelCase = DiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-base''' )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
__lowerCAmelCase = self.get_inputs(__lowercase )
__lowerCAmelCase = pipe(**__lowercase ).images
__lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
__lowerCAmelCase = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 174 | 0 |
'''simple docstring'''
UpperCamelCase_ : Any = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
UpperCamelCase_ : Dict = frozenset(['''prompt''', '''negative_prompt'''])
UpperCamelCase_ : List[Any] = frozenset([])
UpperCamelCase_ : Dict = frozenset(['''image'''])
UpperCamelCase_ : Union[str, Any] = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
UpperCamelCase_ : List[str] = frozenset(['''image'''])
UpperCamelCase_ : Dict = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
UpperCamelCase_ : Optional[Any] = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
UpperCamelCase_ : Union[str, Any] = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
UpperCamelCase_ : int = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
UpperCamelCase_ : int = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
UpperCamelCase_ : Optional[int] = frozenset(['''image''', '''mask_image'''])
UpperCamelCase_ : Tuple = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
UpperCamelCase_ : str = frozenset(['''example_image''', '''image''', '''mask_image'''])
UpperCamelCase_ : Optional[Any] = frozenset(['''class_labels'''])
UpperCamelCase_ : Optional[int] = frozenset(['''class_labels'''])
UpperCamelCase_ : str = frozenset(['''batch_size'''])
UpperCamelCase_ : Dict = frozenset([])
UpperCamelCase_ : List[Any] = frozenset(['''batch_size'''])
UpperCamelCase_ : Optional[int] = frozenset([])
UpperCamelCase_ : Optional[int] = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
UpperCamelCase_ : int = frozenset(['''prompt''', '''negative_prompt'''])
UpperCamelCase_ : Optional[Any] = frozenset(['''input_tokens'''])
UpperCamelCase_ : Union[str, Any] = frozenset(['''input_tokens'''])
| 142 |
'''simple docstring'''
def __a ( _UpperCamelCase: int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
_snake_case = len(bin(_UpperCamelCase )[3:] )
_snake_case = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
_snake_case = (
(
"1"
+ "0" * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 142 | 1 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowercase__ = (720, 1280) # Height, Width
lowercase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowercase__ = 1 / 100
lowercase__ = """"""
lowercase__ = """"""
lowercase__ = """"""
lowercase__ = 250
def _snake_case ( ):
_lowerCamelCase, _lowerCamelCase : str = get_dataset(lowercase__ , lowercase__ )
for index in range(lowercase__ ):
_lowerCamelCase : List[str] = random.sample(range(len(lowercase__ ) ) , 4 )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = update_image_and_anno(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , filter_scale=lowercase__ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCamelCase : int = random_chars(32 )
_lowerCamelCase : Dict = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
_lowerCamelCase : List[str] = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , lowercase__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
_lowerCamelCase : Optional[int] = []
for anno in new_annos:
_lowerCamelCase : Union[str, Any] = anno[3] - anno[1]
_lowerCamelCase : Tuple = anno[4] - anno[2]
_lowerCamelCase : Union[str, Any] = anno[1] + width / 2
_lowerCamelCase : Optional[int] = anno[2] + height / 2
_lowerCamelCase : int = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(lowercase__ )
with open(f'''{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Dict = []
_lowerCamelCase : int = []
for label_file in glob.glob(os.path.join(lowercase__ , '*.txt' ) ):
_lowerCamelCase : List[Any] = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(lowercase__ ) as in_file:
_lowerCamelCase : int = in_file.readlines()
_lowerCamelCase : Union[str, Any] = os.path.join(lowercase__ , f'''{label_name}.jpg''' )
_lowerCamelCase : List[Any] = []
for obj_list in obj_lists:
_lowerCamelCase : int = obj_list.rstrip('\n' ).split(' ' )
_lowerCamelCase : List[str] = float(obj[1] ) - float(obj[3] ) / 2
_lowerCamelCase : str = float(obj[2] ) - float(obj[4] ) / 2
_lowerCamelCase : str = float(obj[1] ) + float(obj[3] ) / 2
_lowerCamelCase : Optional[int] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(lowercase__ )
labels.append(lowercase__ )
return img_paths, labels
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = 0.0 , ):
_lowerCamelCase : Optional[Any] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCamelCase : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCamelCase : Tuple = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCamelCase : List[str] = int(scale_x * output_size[1] )
_lowerCamelCase : str = int(scale_y * output_size[0] )
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : Tuple = []
for i, index in enumerate(lowercase__ ):
_lowerCamelCase : Optional[Any] = all_img_list[index]
path_list.append(lowercase__ )
_lowerCamelCase : str = all_annos[index]
_lowerCamelCase : Union[str, Any] = cva.imread(lowercase__ )
if i == 0: # top-left
_lowerCamelCase : Dict = cva.resize(lowercase__ , (divid_point_x, divid_point_y) )
_lowerCamelCase : Dict = img
for bbox in img_annos:
_lowerCamelCase : Optional[int] = bbox[1] * scale_x
_lowerCamelCase : str = bbox[2] * scale_y
_lowerCamelCase : List[str] = bbox[3] * scale_x
_lowerCamelCase : int = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCamelCase : int = cva.resize(lowercase__ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCamelCase : List[Any] = img
for bbox in img_annos:
_lowerCamelCase : Union[str, Any] = scale_x + bbox[1] * (1 - scale_x)
_lowerCamelCase : List[str] = bbox[2] * scale_y
_lowerCamelCase : int = scale_x + bbox[3] * (1 - scale_x)
_lowerCamelCase : Optional[int] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCamelCase : Optional[Any] = cva.resize(lowercase__ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCamelCase : int = img
for bbox in img_annos:
_lowerCamelCase : Optional[int] = bbox[1] * scale_x
_lowerCamelCase : List[Any] = scale_y + bbox[2] * (1 - scale_y)
_lowerCamelCase : Optional[int] = bbox[3] * scale_x
_lowerCamelCase : Any = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCamelCase : Tuple = cva.resize(
lowercase__ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCamelCase : Dict = img
for bbox in img_annos:
_lowerCamelCase : str = scale_x + bbox[1] * (1 - scale_x)
_lowerCamelCase : Tuple = scale_y + bbox[2] * (1 - scale_y)
_lowerCamelCase : List[str] = scale_x + bbox[3] * (1 - scale_x)
_lowerCamelCase : Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCamelCase : List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _snake_case ( lowercase__ ):
assert number_char > 1, "The number of character should greater than 1"
_lowerCamelCase : str = ascii_lowercase + digits
return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""") | 96 |
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class __a :
def __init__( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any]=13 , __magic_name__ : int=7 , __magic_name__ : int=False , __magic_name__ : str=True , __magic_name__ : int=False , __magic_name__ : Optional[int]=False , __magic_name__ : List[Any]=19 , __magic_name__ : Any=32 , __magic_name__ : List[Any]=5 , __magic_name__ : Dict=4 , __magic_name__ : Optional[int]=37 , __magic_name__ : str="gelu" , __magic_name__ : int=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : List[str]=5_12 , __magic_name__ : Union[str, Any]=16 , __magic_name__ : Tuple=2 , __magic_name__ : Union[str, Any]=0.0_2 , __magic_name__ : int=3 , __magic_name__ : Dict=4 , __magic_name__ : str=None , ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Dict = batch_size
UpperCAmelCase_ : Tuple = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Tuple = use_input_mask
UpperCAmelCase_ : Union[str, Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Optional[Any] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : List[str] = hidden_act
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : Dict = type_sequence_label_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[Any] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Tuple = None
if self.use_input_mask:
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = None
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__magic_name__ , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def UpperCAmelCase__ ( self : str , __magic_name__ : int , __magic_name__ : Tuple , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = EsmForProteinFolding(config=__magic_name__ ).float()
model.to(__magic_name__ )
model.eval()
UpperCAmelCase_ : Any = model(__magic_name__ , attention_mask=__magic_name__ )
UpperCAmelCase_ : List[Any] = model(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = model(__magic_name__ )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : int = config_and_inputs
UpperCAmelCase_ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __a (lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Any = False
__a : Any = (EsmForProteinFolding,) if is_torch_available() else ()
__a : Any = ()
__a : Optional[Any] = {} if is_torch_available() else {}
__a : List[str] = False
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = EsmFoldModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip('''Does not support attention outputs''' )
def UpperCAmelCase__ ( self : Dict ) -> Dict:
"""simple docstring"""
pass
@unittest.skip
def UpperCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def UpperCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def UpperCAmelCase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : str ) -> Dict:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def UpperCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
pass
@require_torch
class __a (lowerCamelCase ):
@slow
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCAmelCase_ : Optional[int] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCAmelCase_ : Tuple = model(__magic_name__ )['''positions''']
UpperCAmelCase_ : Dict = torch.tensor([2.5_8_2_8, 0.7_9_9_3, -1_0.9_3_3_4] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __magic_name__ , atol=1E-4 ) )
| 125 | 0 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class UpperCAmelCase :
def __init__( self : Dict , __snake_case : Dict , __snake_case : List[str]=13 , __snake_case : Optional[Any]=7 , __snake_case : Tuple=True , __snake_case : Any=True , __snake_case : List[str]=False , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=99 , __snake_case : Union[str, Any]=32 , __snake_case : Tuple=5 , __snake_case : int=4 , __snake_case : Optional[int]=37 , __snake_case : Tuple="gelu" , __snake_case : Dict=0.1 , __snake_case : str=0.1 , __snake_case : str=5_12 , __snake_case : Dict=16 , __snake_case : Any=2 , __snake_case : Dict=0.02 , __snake_case : List[Any]=3 , __snake_case : Optional[int]=4 , __snake_case : Dict=None , ) -> Optional[Any]:
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_input_mask
_lowerCAmelCase = use_token_type_ids
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = type_sequence_label_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_choices
_lowerCAmelCase = scope
def lowercase__ ( self : Tuple ) -> List[Any]:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> Optional[Any]:
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def lowercase__ ( self : Tuple , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str , __snake_case : Any , __snake_case : Dict , __snake_case : Optional[Any] ) -> List[str]:
_lowerCAmelCase = LlamaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_lowerCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
_lowerCAmelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : Any , __snake_case : Optional[int] , __snake_case : List[Any] , ) -> Dict:
_lowerCAmelCase = True
_lowerCAmelCase = LlamaModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_lowerCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )
_lowerCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , )
_lowerCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Optional[int] , __snake_case : str , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : Any , ) -> Dict:
_lowerCAmelCase = LlamaForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_lowerCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Union[str, Any] , __snake_case : List[str] , __snake_case : int , __snake_case : Any , __snake_case : int , __snake_case : Any , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[Any] , ) -> Any:
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = LlamaForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
_lowerCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ , )
_lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
_lowerCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["""hidden_states"""][0]
_lowerCAmelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["""hidden_states"""][0]
# select random slice
_lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def lowercase__ ( self : Any ) -> int:
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_lowercase: int = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_lowercase: Dict = (LlamaForCausalLM,) if is_torch_available() else ()
_lowercase: List[Any] = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowercase: List[str] = False
_lowercase: int = False
def lowercase__ ( self : Any ) -> Union[str, Any]:
_lowerCAmelCase = LlamaModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def lowercase__ ( self : str ) -> Any:
self.config_tester.run_common_tests()
def lowercase__ ( self : int ) -> str:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase__ ( self : int ) -> Tuple:
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def lowercase__ ( self : int ) -> int:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase__ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = LlamaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_lowerCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = """single_label_classification"""
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase__ )
_lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowerCAmelCase = LlamaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_lowerCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowercase__ ( self : Any ) -> Optional[Any]:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = 3
_lowerCAmelCase = """multi_label_classification"""
_lowerCAmelCase = input_dict["""input_ids"""]
_lowerCAmelCase = input_ids.ne(1 ).to(lowerCAmelCase__ )
_lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowerCAmelCase = LlamaForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_lowerCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def lowercase__ ( self : str ) -> List[str]:
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def lowercase__ ( self : Union[str, Any] , __snake_case : List[Any] ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = ids_tensor([1, 10] , config.vocab_size )
_lowerCAmelCase = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = LlamaModel(lowerCAmelCase__ )
original_model.to(lowerCAmelCase__ )
original_model.eval()
_lowerCAmelCase = original_model(lowerCAmelCase__ ).last_hidden_state
_lowerCAmelCase = original_model(lowerCAmelCase__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_lowerCAmelCase = {"""type""": scaling_type, """factor""": 10.0}
_lowerCAmelCase = LlamaModel(lowerCAmelCase__ )
scaled_model.to(lowerCAmelCase__ )
scaled_model.eval()
_lowerCAmelCase = scaled_model(lowerCAmelCase__ ).last_hidden_state
_lowerCAmelCase = scaled_model(lowerCAmelCase__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-5 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowercase__ ( self : Optional[Any] ) -> List[str]:
_lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" )
_lowerCAmelCase = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_lowerCAmelCase = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCAmelCase = torch.tensor([-12.82_81, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.82_81, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
_lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" )
_lowerCAmelCase = model(torch.tensor(lowerCAmelCase__ ) )
# Expected mean on dim = -1
_lowerCAmelCase = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCAmelCase = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def lowercase__ ( self : Optional[int] ) -> str:
_lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" )
_lowerCAmelCase = model(torch.tensor(lowerCAmelCase__ ) )
# Expected mean on dim = -1
_lowerCAmelCase = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_lowerCAmelCase = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
_lowerCAmelCase = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_lowerCAmelCase = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" )
_lowerCAmelCase = model(torch.tensor(lowerCAmelCase__ ) )
_lowerCAmelCase = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , lowerCAmelCase__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
_lowerCAmelCase = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , lowerCAmelCase__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def lowercase__ ( self : List[str] ) -> str:
_lowerCAmelCase = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
_lowerCAmelCase = """Simply put, the theory of relativity states that """
_lowerCAmelCase = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
_lowerCAmelCase = tokenizer.encode(lowerCAmelCase__ , return_tensors="""pt""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=lowerCAmelCase__ )
# greedy generation outputs
_lowerCAmelCase = model.generate(lowerCAmelCase__ , max_new_tokens=64 , top_p=lowerCAmelCase__ , temperature=1 , do_sample=lowerCAmelCase__ )
_lowerCAmelCase = tokenizer.decode(generated_ids[0] , skip_special_tokens=lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 366 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A__ : List[str] =TypeVar('''T''')
class UpperCAmelCase ( Generic[T] ):
def __init__( self : Tuple , __snake_case : bool = True ) -> None:
_lowerCAmelCase = {} # dictionary of lists
_lowerCAmelCase = directed
def lowercase__ ( self : Union[str, Any] , __snake_case : T , __snake_case : T ) -> GraphAdjacencyList[T]:
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
self.adj_list[destination_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
_lowerCAmelCase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__snake_case )
_lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCAmelCase = [destination_vertex]
_lowerCAmelCase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__snake_case )
_lowerCAmelCase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCAmelCase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCAmelCase = [destination_vertex]
_lowerCAmelCase = []
return self
def __repr__( self : int ) -> str:
return pformat(self.adj_list )
| 220 | 0 |
from __future__ import annotations
from fractions import Fraction
def a( A : int , A : int ) -> bool:
"""simple docstring"""
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def a( A : str ) -> list[str]:
"""simple docstring"""
a = []
a = 11
a = int("1" + "0" * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(f'''{num}/{den}''' )
den += 1
num += 1
a = 10
return solutions
def a( A : List[Any] = 2 ) -> int:
"""simple docstring"""
a = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
a = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 227 | __snake_case = '''Input must be a string of 8 numbers plus letter'''
__snake_case = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def lowerCAmelCase_ ( __lowerCAmelCase )-> bool:
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCAmelCase : Optional[Any] =f'''Expected string as input, found {type(__lowerCAmelCase ).__name__}'''
raise TypeError(__lowerCAmelCase )
UpperCAmelCase : List[Any] =spanish_id.replace('''-''' , '''''' ).upper()
if len(__lowerCAmelCase ) != 9:
raise ValueError(__lowerCAmelCase )
try:
UpperCAmelCase : int =int(spanish_id_clean[0:8] )
UpperCAmelCase : Optional[int] =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__lowerCAmelCase ) from ex
if letter.isdigit():
raise ValueError(__lowerCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348 | 0 |
"""simple docstring"""
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
snake_case = """\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"""
snake_case = """\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"""
snake_case = """\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n"""
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
def remove_articles(lowercase ):
SCREAMING_SNAKE_CASE : Optional[Any] = re.compile(R"\b(a|an|the)\b" , re.UNICODE )
return re.sub(lowerCAmelCase__ , " " , lowerCAmelCase__ )
def white_space_fix(lowercase ):
return " ".join(text.split() )
def remove_punc(lowercase ):
SCREAMING_SNAKE_CASE : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowercase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCAmelCase__ ) ) ) )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
return int(normalize_answer(lowerCAmelCase__ ) == normalize_answer(lowerCAmelCase__ ) )
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [any(compute_exact(lowerCAmelCase__ , lowerCAmelCase__ ) for ref in refs ) for pred, refs in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return (sum(lowerCAmelCase__ ) / len(lowerCAmelCase__ )) * 100
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [rgram for rgrams in rgramslist for rgram in rgrams]
SCREAMING_SNAKE_CASE : Union[str, Any] = Counter(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : int = Counter(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Dict = Counter()
for sgram, scount in sgramcounter.items():
SCREAMING_SNAKE_CASE : Optional[Any] = scount * numref
SCREAMING_SNAKE_CASE : Optional[int] = Counter(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = Counter()
for cgram, ccount in cgramcounter.items():
SCREAMING_SNAKE_CASE : int = ccount * numref
# KEEP
SCREAMING_SNAKE_CASE : Optional[Any] = sgramcounter_rep & cgramcounter_rep
SCREAMING_SNAKE_CASE : Dict = keepgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE : Union[str, Any] = sgramcounter_rep & rgramcounter
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : str = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = 1
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : List[Any] = keeptmpscorea / len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
SCREAMING_SNAKE_CASE : List[str] = keeptmpscorea / sum(keepgramcounterall_rep.values() )
SCREAMING_SNAKE_CASE : Tuple = 0
if keepscore_precision > 0 or keepscore_recall > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
SCREAMING_SNAKE_CASE : Any = sgramcounter_rep - cgramcounter_rep
SCREAMING_SNAKE_CASE : Any = delgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE : List[str] = sgramcounter_rep - rgramcounter
SCREAMING_SNAKE_CASE : Any = 0
SCREAMING_SNAKE_CASE : Dict = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : Any = 1
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : List[str] = deltmpscorea / len(lowerCAmelCase__ )
# ADDITION
SCREAMING_SNAKE_CASE : Optional[Any] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = set(lowerCAmelCase__ ) & set(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = set(lowerCAmelCase__ ) - set(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
SCREAMING_SNAKE_CASE : List[str] = 1
SCREAMING_SNAKE_CASE : List[str] = 1
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : Any = addtmpscore / len(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = addtmpscore / len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
SCREAMING_SNAKE_CASE : Optional[Any] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = ssent.split(" " )
SCREAMING_SNAKE_CASE : Union[str, Any] = csent.split(" " )
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : Tuple = []
for rsent in rsents:
SCREAMING_SNAKE_CASE : List[Any] = rsent.split(" " )
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : Optional[int] = []
ragramslist.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE : Dict = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
SCREAMING_SNAKE_CASE : Dict = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
SCREAMING_SNAKE_CASE : Tuple = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
ragramslist.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
SCREAMING_SNAKE_CASE : Optional[Any] = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
SCREAMING_SNAKE_CASE : Any = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(lowerCAmelCase__ )
for i in range(0 , len(lowerCAmelCase__ ) - 1 ):
if i < len(lowerCAmelCase__ ) - 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 2:
SCREAMING_SNAKE_CASE : Optional[int] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(lowerCAmelCase__ )
if i < len(lowerCAmelCase__ ) - 3:
SCREAMING_SNAKE_CASE : Union[str, Any] = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : str = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : Tuple = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : Tuple = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
(SCREAMING_SNAKE_CASE) : List[Any] = SARIngram(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
SCREAMING_SNAKE_CASE : List[Any] = sum([delascore, delascore, delascore, delascore] ) / 4
SCREAMING_SNAKE_CASE : Optional[Any] = sum([addascore, addascore, addascore, addascore] ) / 4
SCREAMING_SNAKE_CASE : Any = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCamelCase__ ( lowercase , lowercase = True , lowercase = "13a" , lowercase = True ):
"""simple docstring"""
if lowercase:
SCREAMING_SNAKE_CASE : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
SCREAMING_SNAKE_CASE : Optional[int] = sacrebleu.metrics.bleu._get_tokenizer(lowerCAmelCase__ )()(lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(lowerCAmelCase__ )
elif tokenizer == "moses":
SCREAMING_SNAKE_CASE : Dict = sacremoses.MosesTokenizer().tokenize(lowerCAmelCase__ , return_str=lowerCAmelCase__ , escape=lowerCAmelCase__ )
elif tokenizer == "penn":
SCREAMING_SNAKE_CASE : Optional[int] = sacremoses.MosesTokenizer().penn_tokenize(lowerCAmelCase__ , return_str=lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Any = sentence
if not return_str:
SCREAMING_SNAKE_CASE : Optional[int] = normalized_sent.split()
return normalized_sent
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if not (len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) == len(lowerCAmelCase__ )):
raise ValueError("Sources length must match predictions and references lengths." )
SCREAMING_SNAKE_CASE : Any = 0
for src, pred, refs in zip(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
sari_score += SARIsent(normalize(lowerCAmelCase__ ) , normalize(lowerCAmelCase__ ) , [normalize(lowerCAmelCase__ ) for sent in refs] )
SCREAMING_SNAKE_CASE : int = sari_score / len(lowerCAmelCase__ )
return 100 * sari_score
def lowerCamelCase__ ( lowercase , lowercase , lowercase="exp" , lowercase=None , lowercase=False , lowercase=False , lowercase=False , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = len(references[0] )
if any(len(lowerCAmelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
SCREAMING_SNAKE_CASE : Optional[Any] = [[refs[i] for refs in references] for i in range(lowerCAmelCase__ )]
SCREAMING_SNAKE_CASE : Union[str, Any] = sacrebleu.corpus_bleu(
lowerCAmelCase__ , lowerCAmelCase__ , smooth_method=lowerCAmelCase__ , smooth_value=lowerCAmelCase__ , force=lowerCAmelCase__ , lowercase=lowerCAmelCase__ , use_effective_order=lowerCAmelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE ( datasets.Metric ):
'''simple docstring'''
def _A ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
] , reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def _A ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE : int = {}
result.update({"sari": compute_sari(sources=snake_case_ , predictions=snake_case_ , references=snake_case_ )} )
result.update({"sacrebleu": compute_sacrebleu(predictions=snake_case_ , references=snake_case_ )} )
result.update({"exact": compute_em(predictions=snake_case_ , references=snake_case_ )} )
return result
| 352 |
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 319 | 0 |
from torch import nn
def A_ ( A__ ) -> Dict:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F'Unsupported activation function: {act_fn}' )
| 99 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : Any =logging.get_logger(__name__)
lowerCAmelCase__ : str ={
'''microsoft/unispeech-sat-base-100h-libri-ft''': (
'''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'''
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : Dict = '''unispeech-sat'''
def __init__( self , _A=32 , _A=768 , _A=12 , _A=12 , _A=3_072 , _A="gelu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.0 , _A=0.1 , _A=0.1 , _A=0.0_2 , _A=1e-5 , _A="group" , _A="gelu" , _A=(512, 512, 512, 512, 512, 512, 512) , _A=(5, 2, 2, 2, 2, 2, 2) , _A=(10, 3, 3, 3, 3, 2, 2) , _A=False , _A=128 , _A=16 , _A=False , _A=True , _A=0.0_5 , _A=10 , _A=2 , _A=0.0 , _A=10 , _A=0 , _A=320 , _A=2 , _A=0.1 , _A=100 , _A=256 , _A=256 , _A=0.1 , _A="mean" , _A=False , _A=False , _A=256 , _A=(512, 512, 512, 512, 1_500) , _A=(5, 3, 3, 1, 1) , _A=(1, 2, 3, 1, 1) , _A=512 , _A=0 , _A=1 , _A=2 , _A=504 , **_A , ):
'''simple docstring'''
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = num_clusters
__SCREAMING_SNAKE_CASE = do_stable_layer_norm
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__SCREAMING_SNAKE_CASE = num_codevectors_per_group
__SCREAMING_SNAKE_CASE = num_codevector_groups
__SCREAMING_SNAKE_CASE = contrastive_logits_temperature
__SCREAMING_SNAKE_CASE = feat_quantizer_dropout
__SCREAMING_SNAKE_CASE = num_negatives
__SCREAMING_SNAKE_CASE = codevector_dim
__SCREAMING_SNAKE_CASE = proj_codevector_dim
__SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = list(_A )
__SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def _A ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 257 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( snake_case__ ):
SCREAMING_SNAKE_CASE_ =["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ ="""Pix2StructImageProcessor"""
SCREAMING_SNAKE_CASE_ =("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = False
super().__init__(_A , _A )
def __call__( self : str , snake_case__ : int=None , snake_case__ : Optional[int] = None , snake_case__ : Tuple = True , snake_case__ : Tuple = False , snake_case__ : Optional[Any] = None , snake_case__ : str = None , snake_case__ : Tuple = 2_0_4_8 , snake_case__ : Tuple = 0 , snake_case__ : List[str] = None , snake_case__ : Any = None , snake_case__ : Any = False , snake_case__ : Tuple = False , snake_case__ : int = False , snake_case__ : Tuple = False , snake_case__ : str = False , snake_case__ : Dict = True , snake_case__ : Union[str, Any] = None , **snake_case__ : str , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase__ : Union[str, Any] = self.tokenizer
UpperCAmelCase__ : str = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase__ : Optional[int] = self.image_processor(
_A , return_tensors=_A , max_patches=_A , **_A )
else:
# add pixel_values and bbox
UpperCAmelCase__ : Any = self.image_processor(
_A , return_tensors=_A , max_patches=_A , header_text=_A , **_A )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase__ : str = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_token_type_ids=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
if "attention_mask" in text_encoding:
UpperCAmelCase__ : Optional[int] = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase__ : Optional[int] = text_encoding.pop("input_ids" )
else:
UpperCAmelCase__ : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(_A )
return encoding_image_processor
def __a ( self : str , *snake_case__ : int , **snake_case__ : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def __a ( self : List[Any] , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.tokenizer.model_input_names
UpperCAmelCase__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 364 |
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCAmelCase : Union[str, Any] = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0}
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Optional[Any] = num_channels
UpperCAmelCase__ : Any = image_size
UpperCAmelCase__ : int = min_resolution
UpperCAmelCase__ : Tuple = max_resolution
UpperCAmelCase__ : Optional[int] = size
UpperCAmelCase__ : Optional[int] = do_normalize
UpperCAmelCase__ : str = do_convert_rgb
UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6]
UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6}
def __a ( self : str ):
'''simple docstring'''
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : int = PixaStructImageProcessingTester(self )
@property
def __a ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image()
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
UpperCAmelCase__ : Dict = 2_0_4_8
UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : List[Any] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCAmelCase__ : Optional[int] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[Any] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
UpperCAmelCase__ : Optional[Any] = "Hello"
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Dict ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Dict = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : List[str] = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __a ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
UpperCAmelCase__ : int = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : int = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : str = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCAmelCase__ : Optional[int] = 3
@property
def __a ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) )
def __a ( self : int ):
'''simple docstring'''
# Initialize image_processor
UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
UpperCAmelCase__ : str = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCAmelCase__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCAmelCase__ : Dict = image_processor(
snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 298 | 0 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = '''https://openaipublic.azureedge.net/jukebox/models/'''
_UpperCamelCase = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def a_ ( _lowerCAmelCase ) -> Dict:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : Optional[Any] = key.replace('.model.1.bias' ,'.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : str = key.replace('.model.1.weight' ,'.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : List[str] = key.replace('.model.3.bias' ,'.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__lowerCamelCase : Dict = key.replace('.model.3.weight' ,'.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__lowerCamelCase : str = key.replace('conditioner_blocks.0' ,'conditioner_blocks' )
if "prime_prior" in key:
__lowerCamelCase : Union[str, Any] = key.replace('prime_prior' ,'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowerCamelCase : Tuple = key.replace('.emb.' ,'.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' ,'.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' ,'metadata_embedding.' )
if "x_emb.emb." in key:
__lowerCamelCase : Union[str, Any] = key.replace('0.x_emb.emb' ,'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' ,'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' ,'.layer_norm' )
if "_ln" in key:
return key.replace('_ln' ,'_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' ,'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' ,'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' ,'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' ,'embed_tokens' )
return key
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : int = {}
import re
__lowerCamelCase : List[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__lowerCamelCase : Optional[Any] = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : Optional[int] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : str = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__lowerCamelCase : Tuple = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : Optional[Any] = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__lowerCamelCase : Any = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__lowerCamelCase : str = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(a__ ):
__lowerCamelCase : Optional[int] = re_encoder_block_conv_in.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase : str = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase : Dict = re_encoder_block_conv_in.sub(a__ ,a__ )
elif re_encoder_block_resnet.fullmatch(a__ ):
__lowerCamelCase : Dict = re_encoder_block_resnet.match(a__ )
__lowerCamelCase : int = regex_match.groups()
__lowerCamelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
__lowerCamelCase : List[Any] = {'1': 1, '3': 2}[groups[-2]]
__lowerCamelCase : Dict = F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__lowerCamelCase : Tuple = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase : Optional[int] = prefix + resnet_block
__lowerCamelCase : str = re_encoder_block_resnet.sub(a__ ,a__ )
elif re_encoder_block_proj_out.fullmatch(a__ ):
__lowerCamelCase : str = re_encoder_block_proj_out.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : List[Any] = F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__lowerCamelCase : Union[str, Any] = re_encoder_block_proj_out.sub(a__ ,a__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_decoder_block_conv_out.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : int = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase : Optional[int] = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase : Any = re_decoder_block_conv_out.sub(a__ ,a__ )
elif re_decoder_block_resnet.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_decoder_block_resnet.match(a__ )
__lowerCamelCase : List[str] = regex_match.groups()
__lowerCamelCase : str = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowerCamelCase : Optional[Any] = {'1': 1, '3': 2}[groups[-2]]
__lowerCamelCase : Dict = F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__lowerCamelCase : Optional[int] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase : Tuple = prefix + resnet_block
__lowerCamelCase : int = re_decoder_block_resnet.sub(a__ ,a__ )
elif re_decoder_block_proj_in.fullmatch(a__ ):
__lowerCamelCase : Optional[Any] = re_decoder_block_proj_in.match(a__ )
__lowerCamelCase : Dict = regex_match.groups()
__lowerCamelCase : Dict = F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__lowerCamelCase : str = re_decoder_block_proj_in.sub(a__ ,a__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(a__ ):
__lowerCamelCase : Optional[Any] = re_prior_cond_conv_out.match(a__ )
__lowerCamelCase : Optional[int] = regex_match.groups()
__lowerCamelCase : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase : str = F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__lowerCamelCase : Optional[int] = re_prior_cond_conv_out.sub(a__ ,a__ )
elif re_prior_cond_resnet.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_prior_cond_resnet.match(a__ )
__lowerCamelCase : str = regex_match.groups()
__lowerCamelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowerCamelCase : str = {'1': 1, '3': 2}[groups[-2]]
__lowerCamelCase : Optional[Any] = F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__lowerCamelCase : Union[str, Any] = F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__lowerCamelCase : List[Any] = prefix + resnet_block
__lowerCamelCase : Optional[Any] = re_prior_cond_resnet.sub(a__ ,a__ )
elif re_prior_cond_proj_in.fullmatch(a__ ):
__lowerCamelCase : Union[str, Any] = re_prior_cond_proj_in.match(a__ )
__lowerCamelCase : List[str] = regex_match.groups()
__lowerCamelCase : str = F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__lowerCamelCase : List[Any] = re_prior_cond_proj_in.sub(a__ ,a__ )
# keep original key
else:
__lowerCamelCase : Tuple = original_key
__lowerCamelCase : Optional[int] = replace_key(a__ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
__lowerCamelCase : Dict = model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__lowerCamelCase : int = original_key
__lowerCamelCase : Any = original_key
__lowerCamelCase : Dict = value
return new_dict
@torch.no_grad()
def a_ ( _lowerCAmelCase=None ,_lowerCAmelCase=None ) -> Tuple:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__lowerCamelCase : Any = requests.get(F'{PREFIX}{file}' ,allow_redirects=a__ )
os.makedirs(F'{pytorch_dump_folder_path}/' ,exist_ok=a__ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ,'wb' ).write(r.content )
__lowerCamelCase : Any = MODEL_MAPPING[model_name.split('/' )[-1]]
__lowerCamelCase : str = JukeboxConfig.from_pretrained(a__ )
__lowerCamelCase : List[Any] = JukeboxModel(a__ )
__lowerCamelCase : Any = []
__lowerCamelCase : List[str] = {}
for i, dict_name in enumerate(a__ ):
__lowerCamelCase : Dict = torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
__lowerCamelCase : List[str] = {}
for k in old_dic.keys():
if k.endswith('.b' ):
__lowerCamelCase : int = old_dic[k]
elif k.endswith('.w' ):
__lowerCamelCase : Optional[Any] = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowerCamelCase : int = old_dic[k]
else:
__lowerCamelCase : Tuple = old_dic[k]
__lowerCamelCase : Any = 'vqvae' if i == 0 else F'priors.{3 - i}'
__lowerCamelCase : List[Any] = fix_jukebox_keys(a__ ,model.state_dict() ,a__ ,a__ )
weight_dict.append(a__ )
__lowerCamelCase : List[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(a__ )
for i in range(len(a__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(a__ ).mkdir(exist_ok=a__ )
with open(F'{pytorch_dump_folder_path}/mapping.json' ,'w' ) as txtfile:
json.dump(a__ ,a__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
return weight_dict
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
_UpperCamelCase = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 208 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ :Any = logging.get_logger(__name__)
def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any] , a__: Dict , a__: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = original_name.split('.' )[0]
_UpperCAmelCase = key.split('.' )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 2] )
_UpperCAmelCase = int(key_list[key_list.index(a__ ) - 1] )
_UpperCAmelCase = orig_block_num - offset
_UpperCAmelCase = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''' )
return key
def lowerCAmelCase__ ( a__: Tuple ) -> int:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase , _UpperCAmelCase = 0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
_UpperCAmelCase = key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
_UpperCAmelCase = key[: key.find('proj' )]
_UpperCAmelCase = key.replace(a__ , F'''patch_embeddings.{total_embed_found}.''' )
_UpperCAmelCase = key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
_UpperCAmelCase = 'poolformer.encoder.' + key
if "mlp.fc1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm1' , 'before_norm' )
if "norm2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
_UpperCAmelCase = replace_key_with_offset(a__ , a__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
_UpperCAmelCase = key.replace('head' , 'classifier' )
_UpperCAmelCase = value
return new_state_dict
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
_UpperCAmelCase = Image.open(requests.get(a__ , stream=a__ ).raw )
return image
@torch.no_grad()
def lowerCAmelCase__ ( a__: Optional[int] , a__: Dict , a__: Any ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PoolFormerConfig()
# set attributes based on model_name
_UpperCAmelCase = 'huggingface/label-files'
_UpperCAmelCase = model_name[-3:]
_UpperCAmelCase = 1_0_0_0
_UpperCAmelCase = 'imagenet-1k-id2label.json'
_UpperCAmelCase = (1, 1_0_0_0)
# set config attributes
_UpperCAmelCase = json.load(open(hf_hub_download(a__ , a__ , repo_type='dataset' ) , 'r' ) )
_UpperCAmelCase = {int(a__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
if size == "s12":
_UpperCAmelCase = [2, 2, 6, 2]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s24":
_UpperCAmelCase = [4, 4, 1_2, 4]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 0.9
elif size == "s36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [6_4, 1_2_8, 3_2_0, 5_1_2]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.9
elif size == "m36":
_UpperCAmelCase = [6, 6, 1_8, 6]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
elif size == "m48":
_UpperCAmelCase = [8, 8, 2_4, 8]
_UpperCAmelCase = [9_6, 1_9_2, 3_8_4, 7_6_8]
_UpperCAmelCase = 4.0
_UpperCAmelCase = 1e-6
_UpperCAmelCase = 0.95
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
# Prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=a__ , return_tensors='pt' ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
_UpperCAmelCase = torch.load(a__ , map_location=torch.device('cpu' ) )
# rename keys
_UpperCAmelCase = rename_keys(a__ )
# create HuggingFace model and load state dict
_UpperCAmelCase = PoolFormerForImageClassification(a__ )
model.load_state_dict(a__ )
model.eval()
# Define image processor
_UpperCAmelCase = PoolFormerImageProcessor(crop_pct=a__ )
_UpperCAmelCase = image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
_UpperCAmelCase = model(a__ )
_UpperCAmelCase = outputs.logits
# define expected logit slices for different models
if size == "s12":
_UpperCAmelCase = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
_UpperCAmelCase = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
_UpperCAmelCase = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
_UpperCAmelCase = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
_UpperCAmelCase = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(F'''Size {size} not supported''' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(a__ ).mkdir(exist_ok=a__ )
model.save_pretrained(a__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(a__ )
if __name__ == "__main__":
lowerCAmelCase__ :str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ :Dict = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 329 | 0 |
from __future__ import annotations
def _lowerCamelCase( lowercase__ ) -> list[int]:
'''simple docstring'''
return [ord(lowercase__ ) - 9_6 for elem in plain]
def _lowerCamelCase( lowercase__ ) -> str:
'''simple docstring'''
return "".join(chr(elem + 9_6 ) for elem in encoded )
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowercase__ )
print('Decoded:' , decode(lowercase__ ) )
if __name__ == "__main__":
main()
| 304 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
lowerCAmelCase = {'''UserAgent''': UserAgent().random}
def _lowerCamelCase( lowercase__ ) -> dict:
'''simple docstring'''
__lowercase= script.contents[0]
__lowercase= json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class A :
def __init__(self , lowerCAmelCase ):
__lowercase= f'https://www.instagram.com/{username}/'
__lowercase= self.get_json()
def _A (self ):
__lowercase= requests.get(self.url , headers=lowerCAmelCase ).text
__lowercase= BeautifulSoup(lowerCAmelCase , 'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__(self ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__(self ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _A (self ):
return self.user_data["username"]
@property
def _A (self ):
return self.user_data["full_name"]
@property
def _A (self ):
return self.user_data["biography"]
@property
def _A (self ):
return self.user_data["business_email"]
@property
def _A (self ):
return self.user_data["external_url"]
@property
def _A (self ):
return self.user_data["edge_followed_by"]["count"]
@property
def _A (self ):
return self.user_data["edge_follow"]["count"]
@property
def _A (self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _A (self ):
return self.user_data["profile_pic_url_hd"]
@property
def _A (self ):
return self.user_data["is_verified"]
@property
def _A (self ):
return self.user_data["is_private"]
def _lowerCamelCase( lowercase__ = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
__lowercase= InstagramUser(lowercase__ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowercase__ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_5_0
assert instagram_user.number_of_followers > 1_2_0_0_0_0
assert instagram_user.number_of_followings > 1_5
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase = InstagramUser('''github''')
print(instagram_user)
print(F'{instagram_user.number_of_posts = }')
print(F'{instagram_user.number_of_followers = }')
print(F'{instagram_user.number_of_followings = }')
print(F'{instagram_user.email = }')
print(F'{instagram_user.website = }')
print(F'{instagram_user.profile_picture_url = }')
print(F'{instagram_user.is_verified = }')
print(F'{instagram_user.is_private = }')
| 304 | 1 |
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCamelCase = logging.getLogger(__name__)
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
# save results
if os.path.exists(__lowerCamelCase ):
if os.path.exists(os.path.join(__lowerCamelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCamelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCamelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCamelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCamelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=False ) -> Dict:
SCREAMING_SNAKE_CASE = 2
if unlogit:
SCREAMING_SNAKE_CASE = torch.pow(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = p * torch.log(__lowerCamelCase )
SCREAMING_SNAKE_CASE = 0
return -plogp.sum(dim=-1 )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] ) -> List[Any]:
logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(__lowerCamelCase ) ) ) )
for row in range(len(__lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model.config.num_hidden_layers, model.config.num_attention_heads
SCREAMING_SNAKE_CASE = torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
SCREAMING_SNAKE_CASE = torch.zeros(__lowerCamelCase , __lowerCamelCase ).to(args.device )
if head_mask is None:
SCREAMING_SNAKE_CASE = torch.ones(__lowerCamelCase , __lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 0.0
for step, inputs in enumerate(tqdm(__lowerCamelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
SCREAMING_SNAKE_CASE = tuple(t.to(args.device ) for t in inputs )
((SCREAMING_SNAKE_CASE ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
SCREAMING_SNAKE_CASE = model(__lowerCamelCase , labels=__lowerCamelCase , head_mask=__lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE = entropy(attn.detach() , __lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = torch.pow(torch.pow(__lowerCamelCase , __lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
SCREAMING_SNAKE_CASE = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCamelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCamelCase )
logger.info('Head ranked by importance scores' )
SCREAMING_SNAKE_CASE = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
SCREAMING_SNAKE_CASE = torch.arange(
head_importance.numel() , device=args.device )
SCREAMING_SNAKE_CASE = head_ranks.view_as(__lowerCamelCase )
print_ad_tensor(__lowerCamelCase )
return attn_entropy, head_importance, total_loss
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase )
SCREAMING_SNAKE_CASE = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCamelCase , original_score * args.masking_threshold )
SCREAMING_SNAKE_CASE = torch.ones_like(__lowerCamelCase )
SCREAMING_SNAKE_CASE = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
SCREAMING_SNAKE_CASE = original_score
while current_score >= original_score * args.masking_threshold:
SCREAMING_SNAKE_CASE = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
SCREAMING_SNAKE_CASE = float('Inf' )
SCREAMING_SNAKE_CASE = head_importance.view(-1 ).sort()[1]
if len(__lowerCamelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
SCREAMING_SNAKE_CASE = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
SCREAMING_SNAKE_CASE = new_head_mask.view(-1 )
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = new_head_mask.view_as(__lowerCamelCase )
SCREAMING_SNAKE_CASE = new_head_mask.clone().detach()
print_ad_tensor(__lowerCamelCase )
# Compute metric and head importance again
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , head_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCamelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase (SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str ) -> str:
SCREAMING_SNAKE_CASE = datetime.now()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase )
SCREAMING_SNAKE_CASE = 1 / loss
SCREAMING_SNAKE_CASE = datetime.now() - before_time
SCREAMING_SNAKE_CASE = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [
v,
]
assert sum(len(__lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sum(p.numel() for p in model.parameters() )
SCREAMING_SNAKE_CASE = datetime.now()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = compute_heads_importance(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , compute_entropy=__lowerCamelCase , compute_importance=__lowerCamelCase , head_mask=__lowerCamelCase , actually_pruned=__lowerCamelCase , )
SCREAMING_SNAKE_CASE = 1 / loss
SCREAMING_SNAKE_CASE = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCamelCase , __lowerCamelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCamelCase , __lowerCamelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCamelCase , args.output_dir )
def lowercase () -> Tuple:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCamelCase , type=__lowerCamelCase , required=__lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCamelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCamelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCamelCase , type=__lowerCamelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCamelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCamelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCamelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCamelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCamelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCamelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCamelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCamelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCamelCase , default='' , help='Can be used for distant debugging.' )
SCREAMING_SNAKE_CASE = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
SCREAMING_SNAKE_CASE = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
SCREAMING_SNAKE_CASE = torch.device('cuda' , args.local_rank )
SCREAMING_SNAKE_CASE = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
SCREAMING_SNAKE_CASE = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
SCREAMING_SNAKE_CASE = nn.parallel.DistributedDataParallel(
__lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCamelCase )
elif args.n_gpu > 1:
SCREAMING_SNAKE_CASE = nn.DataParallel(__lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCamelCase )
# Prepare dataset
SCREAMING_SNAKE_CASE = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
SCREAMING_SNAKE_CASE = (torch.from_numpy(__lowerCamelCase ),)
SCREAMING_SNAKE_CASE = TensorDataset(*__lowerCamelCase )
SCREAMING_SNAKE_CASE = RandomSampler(__lowerCamelCase )
SCREAMING_SNAKE_CASE = DataLoader(__lowerCamelCase , sampler=__lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
SCREAMING_SNAKE_CASE = mask_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
prune_heads(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 113 |
'''simple docstring'''
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--user""", type=str, default="""ubuntu""")
parser.add_argument("""--host""", type=str, default="""localhost""")
parser.add_argument("""--key_path""", type=str, default=None)
parser.add_argument("""--instance""", type=str, default="""V100:1""")
parser.add_argument("""--provider""", type=str, default="""cheapest""")
parser.add_argument("""--use_spot""", type=bool, default=False)
parser.add_argument("""--example""", type=str, default="""pytorch/text-generation/run_generation.py""")
lowercase_ , lowercase_ = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError("""Cannot specify both BYO and on-demand cluster args""")
lowercase_ = rh.cluster(
name="""rh-cluster""", ips=[args.host], ssh_creds={"""ssh_user""": args.user, """ssh_private_key""": args.key_path}
)
else:
lowercase_ = rh.cluster(
name="""rh-cluster""", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
lowercase_ = args.example.rsplit("""/""", 1)[0]
# Set up remote environment
cluster.install_packages(["""pip:./"""]) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""])
cluster.run(["""pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"""])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([f"""python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}"""])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 58 | 0 |
"""simple docstring"""
import enum
import shutil
import sys
_A = shutil.get_terminal_size()
_A = {"UP": "A", "DOWN": "B", "RIGHT": "C", "LEFT": "D"}
class _lowercase ( enum.Enum ):
lowercase_ = 0
lowercase_ = 1
def UpperCAmelCase ( a_, a_="" ):
'''simple docstring'''
sys.stdout.write(str(lowerCamelCase__ ) + end )
sys.stdout.flush()
def UpperCAmelCase ( a_, a_, a_="" ):
'''simple docstring'''
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""", lowerCamelCase__ )
def UpperCAmelCase ( ):
'''simple docstring'''
forceWrite('\r' )
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def UpperCAmelCase ( ):
'''simple docstring'''
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def UpperCAmelCase ( ):
'''simple docstring'''
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH )
| 350 |
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _lowercase ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
lowercase_ = [('size', ctypes.c_int), ('visible', ctypes.c_byte)]
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : Optional[int] = CursorInfo()
lowerCamelCase : Union[str, Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Dict = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def UpperCAmelCase ( ):
'''simple docstring'''
if os.name == "nt":
lowerCamelCase : List[str] = CursorInfo()
lowerCamelCase : List[Any] = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
lowerCamelCase : Optional[Any] = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(a_, ctypes.byref(a_ ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def UpperCAmelCase ( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor()
| 205 | 0 |
import os
def _a ( SCREAMING_SNAKE_CASE_ : str = "matrix.txt" ):
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) as in_file:
__lowerCAmelCase = in_file.read()
__lowerCAmelCase = [[int(SCREAMING_SNAKE_CASE_ ) for cell in row.split("," )] for row in data.strip().splitlines()]
__lowerCAmelCase = [[0 for cell in row] for row in grid]
__lowerCAmelCase = len(grid[0] )
__lowerCAmelCase = [[0 for i in range(SCREAMING_SNAKE_CASE_ )] for j in range(SCREAMING_SNAKE_CASE_ )]
__lowerCAmelCase = grid[0][0]
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = grid[0][i] + dp[0][i - 1]
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = grid[i][0] + dp[i - 1][0]
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
for j in range(1 , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 92 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class a__ ( snake_case__ ):
_a : Optional[int] = """decision_transformer"""
_a : Optional[int] = ["""past_key_values"""]
_a : Dict = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = state_dim
__lowerCAmelCase = act_dim
__lowerCAmelCase = hidden_size
__lowerCAmelCase = max_ep_len
__lowerCAmelCase = action_tanh
__lowerCAmelCase = vocab_size
__lowerCAmelCase = n_positions
__lowerCAmelCase = n_layer
__lowerCAmelCase = n_head
__lowerCAmelCase = n_inner
__lowerCAmelCase = activation_function
__lowerCAmelCase = resid_pdrop
__lowerCAmelCase = embd_pdrop
__lowerCAmelCase = attn_pdrop
__lowerCAmelCase = layer_norm_epsilon
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scale_attn_weights
__lowerCAmelCase = use_cache
__lowerCAmelCase = scale_attn_by_inverse_layer_idx
__lowerCAmelCase = reorder_and_upcast_attn
__lowerCAmelCase = bos_token_id
__lowerCAmelCase = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
| 92 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def A (__A : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase_ = []
for line in lines:
UpperCAmelCase_ = re.sub(R'''#.*''' , '''''' , __A ) # remove comments
if line:
filtered_lines.append(__A )
UpperCAmelCase_ = '''\n'''.join(__A )
# Make a hash from all this code
UpperCAmelCase_ = full_str.encode('''utf-8''' )
return shaaaa(__A ).hexdigest()
# get importable module names and hash for caching
snake_case_ : Dict = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
snake_case_ : Any = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''imagefolder''', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('''audiofolder''', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
snake_case_ : Tuple = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
snake_case_ : Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('''.zip''')
_MODULE_TO_EXTENSIONS["audiofolder"].append('''.zip''')
| 355 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = 10
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4]
UpperCAmelCase_ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
UpperCAmelCase_ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_snake_case , self.block_size , 0) , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = ''''''
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
self.assertEqual(_snake_case , [])
self.assertEqual(_snake_case , [])
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
UpperCAmelCase_ , UpperCAmelCase_ = process_story(_snake_case)
UpperCAmelCase_ = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(_snake_case , _snake_case)
UpperCAmelCase_ = ['''It was the best of times.''']
self.assertEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1])
np.testing.assert_array_equal(build_mask(_snake_case , 0).numpy() , expected.numpy())
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([1, 2, 3, 4, 23, 23, 23])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 23).numpy() , expected.numpy())
def lowerCamelCase ( self : int):
"""simple docstring"""
UpperCAmelCase_ = torch.tensor([8, 2, 3, 4, 1, 1, 1])
UpperCAmelCase_ = torch.tensor([1, 1, 1, 1, 0, 0, 0])
np.testing.assert_array_equal(build_mask(_snake_case , 1).numpy() , expected.numpy())
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = 101
UpperCAmelCase_ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]])
UpperCAmelCase_ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]])
UpperCAmelCase_ = compute_token_type_ids(_snake_case , _snake_case)
np.testing.assert_array_equal(_snake_case , _snake_case)
| 7 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( ) -> list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i , -10_00 - i , -1 ) ) for i in range(10_00 )]
lowerCamelCase : Optional[Any] = generate_large_matrix()
lowerCamelCase : Optional[int] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> None:
"""simple docstring"""
assert all(row == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for row in grid )
assert all(list(_UpperCamelCase ) == sorted(_UpperCamelCase , reverse=_UpperCamelCase ) for col in zip(*_UpperCamelCase ) )
def _lowerCAmelCase ( _UpperCamelCase : list[int] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =len(_UpperCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
_SCREAMING_SNAKE_CASE =(left + right) // 2
_SCREAMING_SNAKE_CASE =array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
_SCREAMING_SNAKE_CASE =mid + 1
else:
_SCREAMING_SNAKE_CASE =mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_UpperCamelCase )
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =len(grid[0] )
for i in range(len(_UpperCamelCase ) ):
_SCREAMING_SNAKE_CASE =find_negative_index(grid[i][:bound] )
total += bound
return (len(_UpperCamelCase ) * len(grid[0] )) - total
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def _lowerCAmelCase ( _UpperCamelCase : list[list[int]] ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
for row in grid:
for i, number in enumerate(_UpperCamelCase ):
if number < 0:
total += len(_UpperCamelCase ) - i
break
return total
def _lowerCAmelCase ( ) -> None:
"""simple docstring"""
from timeit import timeit
print('Running benchmarks' )
_SCREAMING_SNAKE_CASE =(
'from __main__ import count_negatives_binary_search, '
'count_negatives_brute_force, count_negatives_brute_force_with_break, grid'
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
_SCREAMING_SNAKE_CASE =timeit(f"{func}(grid=grid)" , setup=_UpperCamelCase , number=5_00 )
print(f"{func}() took {time:0.4f} seconds" )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 47 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
A : Dict = Lock()
def __lowerCamelCase ( __a :Dict , __a :List[str] , __a :Optional[int] , __a :Optional[int] , __a :Optional[Any] , __a :Optional[int] , __a :int ) -> Dict:
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 1_0 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(__a )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A__ = min(__a , __a )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(__a )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A__ = max(__a , __a )
# after all swaps are performed, send the values back to main
result_pipe[1].send(__a )
def __lowerCamelCase ( __a :List[str] ) -> int:
"""simple docstring"""
A__ = []
A__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A__ = temp_rs
A__ = temp_rr
for i in range(1 , len(__a ) - 1 ):
A__ = Pipe()
A__ = Pipe()
process_array_.append(
Process(
target=__a , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A__ = temp_rs
A__ = temp_rr
process_array_.append(
Process(
target=__a , args=(
len(__a ) - 1,
arr[len(__a ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(__a ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(__a ) ):
A__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = list(range(1_0 , 0 , -1 ) )
print("""Initial List""" )
print(*__a )
A__ = odd_even_transposition(__a )
print("""Sorted List\n""" )
print(*__a )
if __name__ == "__main__":
main()
| 274 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase : List[str] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['''DeiTFeatureExtractor''']
_lowercase : List[Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 368 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case__ ( __lowerCamelCase : jnp.ndarray , __lowerCamelCase : int , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1 , __lowerCamelCase : float = 1.0e4 , __lowerCamelCase : bool = False , __lowerCamelCase : float = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even'''
lowerCamelCase__ : Any =float(embedding_dim // 2 )
lowerCamelCase__ : List[str] =math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
lowerCamelCase__ : int =min_timescale * jnp.exp(jnp.arange(__lowerCamelCase , dtype=jnp.floataa ) * -log_timescale_increment )
lowerCamelCase__ : Tuple =jnp.expand_dims(__lowerCamelCase , 1 ) * jnp.expand_dims(__lowerCamelCase , 0 )
# scale embeddings
lowerCamelCase__ : List[str] =scale * emb
if flip_sin_to_cos:
lowerCamelCase__ : int =jnp.concatenate([jnp.cos(__lowerCamelCase ), jnp.sin(__lowerCamelCase )] , axis=1 )
else:
lowerCamelCase__ : List[str] =jnp.concatenate([jnp.sin(__lowerCamelCase ), jnp.cos(__lowerCamelCase )] , axis=1 )
lowerCamelCase__ : str =jnp.reshape(__lowerCamelCase , [jnp.shape(__lowerCamelCase )[0], embedding_dim] )
return signal
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = jnp.floataa
@nn.compact
def __call__( self : Optional[Any], lowerCamelCase : int )-> Any:
lowerCamelCase__ : Optional[Any] =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_1''' )(lowerCamelCase )
lowerCamelCase__ : List[str] =nn.silu(lowerCamelCase )
lowerCamelCase__ : Any =nn.Dense(self.time_embed_dim, dtype=self.dtype, name='''linear_2''' )(lowerCamelCase )
return temb
class __SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
_a = 3_2
_a = False
_a = 1
@nn.compact
def __call__( self : Any, lowerCamelCase : int )-> int:
return get_sinusoidal_embeddings(
lowerCamelCase, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift )
| 272 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PoolFormerImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase__ : List[str] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase__ : Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase__ : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase__ : Optional[int] = {"unk_token": "<unk>"}
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case__ ) )
UpperCAmelCase__ : Union[str, Any] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , snake_case__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(snake_case__ , snake_case__ )
def __a ( self : Dict , **snake_case__ : Any ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case__ )
def __a ( self : Tuple , **snake_case__ : Any ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case__ )
def __a ( self : Optional[Any] , **snake_case__ : Optional[Any] ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ )
def __a ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
UpperCAmelCase__ : List[str] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_tokenizer()
UpperCAmelCase__ : int = self.get_rust_tokenizer()
UpperCAmelCase__ : Dict = self.get_image_processor()
UpperCAmelCase__ : str = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , snake_case__ )
self.assertIsInstance(processor_fast.tokenizer , snake_case__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , snake_case__ )
self.assertIsInstance(processor_fast.image_processor , snake_case__ )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase__ : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCAmelCase__ : int = self.get_image_processor(do_normalize=snake_case__ )
UpperCAmelCase__ : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_image_processor()
UpperCAmelCase__ : Dict = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase__ : str = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[Any] = image_processor(snake_case__ , return_tensors="np" )
UpperCAmelCase__ : int = processor(images=snake_case__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.get_image_processor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : List[str] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase__ : List[Any] = "lower newer"
UpperCAmelCase__ : Union[str, Any] = processor(text=snake_case__ , return_tensors="np" )
UpperCAmelCase__ : Optional[Any] = tokenizer(snake_case__ , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.get_image_processor()
UpperCAmelCase__ : List[str] = self.get_tokenizer()
UpperCAmelCase__ : List[Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase__ : Optional[int] = "lower newer"
UpperCAmelCase__ : Optional[int] = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[Any] = processor(text=snake_case__ , images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = "google/owlvit-base-patch32"
UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(snake_case__ )
UpperCAmelCase__ : Tuple = ["cat", "nasa badge"]
UpperCAmelCase__ : Any = processor(text=snake_case__ )
UpperCAmelCase__ : Dict = 1_6
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def __a ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = "google/owlvit-base-patch32"
UpperCAmelCase__ : Dict = OwlViTProcessor.from_pretrained(snake_case__ )
UpperCAmelCase__ : List[str] = [["cat", "nasa badge"], ["person"]]
UpperCAmelCase__ : Union[str, Any] = processor(text=snake_case__ )
UpperCAmelCase__ : Any = 1_6
UpperCAmelCase__ : Optional[Any] = len(snake_case__ )
UpperCAmelCase__ : Union[str, Any] = max([len(snake_case__ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = "google/owlvit-base-patch32"
UpperCAmelCase__ : List[Any] = OwlViTProcessor.from_pretrained(snake_case__ )
UpperCAmelCase__ : Dict = ["cat", "nasa badge"]
UpperCAmelCase__ : Tuple = processor(text=snake_case__ )
UpperCAmelCase__ : Union[str, Any] = 1_6
UpperCAmelCase__ : List[Any] = inputs["input_ids"]
UpperCAmelCase__ : str = [
[4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __a ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Optional[int] = self.get_tokenizer()
UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase__ : List[str] = self.prepare_image_inputs()
UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs()
UpperCAmelCase__ : int = processor(images=snake_case__ , query_images=snake_case__ )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(snake_case__ ):
processor()
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.get_image_processor()
UpperCAmelCase__ : Tuple = self.get_tokenizer()
UpperCAmelCase__ : Dict = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ )
UpperCAmelCase__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase__ : str = processor.batch_decode(snake_case__ )
UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
| 298 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class lowerCAmelCase__ :
def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : List[str] = 1_0_0
UpperCAmelCase__ : List[Any] = batch_size
UpperCAmelCase__ : int = image_size
UpperCAmelCase__ : List[Any] = patch_size
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : int = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Any = hidden_act
UpperCAmelCase__ : Optional[int] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = scope
UpperCAmelCase__ : Optional[Any] = out_indices
UpperCAmelCase__ : int = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2
UpperCAmelCase__ : Optional[int] = num_patches + 1
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : str = None
UpperCAmelCase__ : Optional[int] = None
if self.use_labels:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase__ : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : int ):
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Dict = model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : List[Any] = model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.type_sequence_label_size
UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.num_labels
UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
UpperCAmelCase__ : int = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def __a ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs
UpperCAmelCase__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
SCREAMING_SNAKE_CASE_ =False
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitModelTester(self )
UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def __a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def __a ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(snake_case__ )
UpperCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : str = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def __a ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def __a ( self : List[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ )
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss
loss.backward()
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
UpperCAmelCase__ : List[Any] = model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss
loss.backward()
def __a ( self : str ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
UpperCAmelCase__ : int = model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __a ( self : Any ):
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ )
UpperCAmelCase__ : int = self.default_image_processor
UpperCAmelCase__ : List[Any] = prepare_img()
UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
UpperCAmelCase__ : str = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Any = torch.tensor(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def __a ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Dict = prepare_img()
UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ )
UpperCAmelCase__ : Any = outputs.logits
# verify the logits
UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : List[str] = 2_8_1
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
snake_case__ )
UpperCAmelCase__ : Tuple = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[Any] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits
# verify the logits
UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
UpperCAmelCase__ : Any = 2_3_9_6
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : List[Any] = model.to(snake_case__ )
UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : List[str] = model(**snake_case__ )
UpperCAmelCase__ : Dict = outputs.logits
# verify the logits
UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) )
self.assertEqual(logits.shape , snake_case__ )
UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]],
[[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]],
[[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]],
] , device=snake_case__ , )
else:
UpperCAmelCase__ : int = torch.tensor(
[
[[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]],
[[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]],
[[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def __a ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
UpperCAmelCase__ : Any = model.to(snake_case__ )
UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ )
UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] )
UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**snake_case__ )
UpperCAmelCase__ : int = outputs.logits.detach().cpu()
UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 298 | 1 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = (DDPMScheduler,)
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **UpperCAmelCase__ : Any) ->Dict:
'''simple docstring'''
A__ = {
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**UpperCAmelCase__)
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict:
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase__ , beta_end=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Dict:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Union[str, Any]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]:
'''simple docstring'''
self.check_over_configs(thresholding=UpperCAmelCase__)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase__ , prediction_type=UpperCAmelCase__ , sample_max_value=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Dict) ->Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->int:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487) - 0.00979)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999) - 0.02)) < 1e-5
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 258.9606) < 1e-2
assert abs(result_mean.item() - 0.3372) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(prediction_type='''v_prediction''')
A__ = scheduler_class(**UpperCAmelCase__)
A__ = len(UpperCAmelCase__)
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = torch.manual_seed(0)
for t in reversed(range(UpperCAmelCase__)):
# 1. predict noise residual
A__ = model(UpperCAmelCase__ , UpperCAmelCase__)
# 2. predict previous mean of sample x_t-1
A__ = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
A__ = pred_prev_sample
A__ = torch.sum(torch.abs(UpperCAmelCase__))
A__ = torch.mean(torch.abs(UpperCAmelCase__))
assert abs(result_sum.item() - 202.0296) < 1e-2
assert abs(result_mean.item() - 0.2631) < 1e-3
def SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[int]:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
A__ = scheduler.timesteps
for i, timestep in enumerate(UpperCAmelCase__):
if i == len(UpperCAmelCase__) - 1:
A__ = -1
else:
A__ = timesteps[i + 1]
A__ = scheduler.previous_timestep(UpperCAmelCase__)
A__ = prev_t.item()
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 51, 0]
with self.assertRaises(UpperCAmelCase__ , msg='''`custom_timesteps` must be in descending order.'''):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [100, 87, 50, 1, 0]
A__ = len(UpperCAmelCase__)
with self.assertRaises(UpperCAmelCase__ , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.'''):
scheduler.set_timesteps(num_inference_steps=UpperCAmelCase__ , timesteps=UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Any:
'''simple docstring'''
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**UpperCAmelCase__)
A__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCAmelCase__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=UpperCAmelCase__)
| 14 |
import requests
from bsa import BeautifulSoup
def _a ( UpperCamelCase_ : str = "AAPL" ) -> str:
"""simple docstring"""
lowerCAmelCase__ = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
lowerCAmelCase__ = BeautifulSoup(requests.get(UpperCamelCase_ ).text , "html.parser" )
lowerCAmelCase__ = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
| 340 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 353 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""DeiTFeatureExtractor"""]
_lowercase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=UpperCamelCase_ ):
_a = ['''onnx''']
def __init__( self : str , *A_ : Dict , **A_ : Union[str, Any]):
requires_backends(self , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A_ : List[str] , **A_ : Optional[Any]):
requires_backends(cls , ['''onnx'''])
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A_ : Dict , **A_ : List[str]):
requires_backends(cls , ['''onnx'''])
| 103 |
'''simple docstring'''
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = '''Hello, World!'''
_lowerCAmelCase = '''en_XX'''
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__ ):
__UpperCamelCase : Union[str, Any] = Path("data_bin" )
__UpperCamelCase : Union[str, Any] = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(snake_case__ ).parent ) , checkpoint_file=Path(snake_case__ ).name , _name="xmod_base" , arch="xmod_base" , task="multilingual_masked_lm" , data_name_or_path=str(snake_case__ ) , bpe="sentencepiece" , sentencepiece_model=str(Path(snake_case__ ).parent / "sentencepiece.bpe.model" ) , src_dict=str(data_dir / "dict.txt" ) , )
xmod.eval() # disable dropout
print(snake_case__ )
__UpperCamelCase : List[str] = xmod.model.encoder.sentence_encoder
__UpperCamelCase : Optional[int] = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , "bottleneck" , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our X-MOD config:" , snake_case__ )
__UpperCamelCase : Dict = XmodForSequenceClassification(snake_case__ ) if classification_head else XmodForMaskedLM(snake_case__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_tokens.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.embed_positions.weight
__UpperCamelCase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
__UpperCamelCase : Any = xmod_sent_encoder.layernorm_embedding.weight
__UpperCamelCase : str = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__UpperCamelCase : int = model.roberta.encoder.layer[i]
__UpperCamelCase : Any = xmod_sent_encoder.layers[i]
# self attention
__UpperCamelCase : List[str] = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError("Dimensions of self-attention weights do not match." )
__UpperCamelCase : Dict = xmod_layer.self_attn.q_proj.weight
__UpperCamelCase : Optional[Any] = xmod_layer.self_attn.q_proj.bias
__UpperCamelCase : Any = xmod_layer.self_attn.k_proj.weight
__UpperCamelCase : Tuple = xmod_layer.self_attn.k_proj.bias
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.v_proj.weight
__UpperCamelCase : Any = xmod_layer.self_attn.v_proj.bias
# self-attention output
__UpperCamelCase : Optional[int] = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError("Dimensions of self-attention output weights do not match." )
__UpperCamelCase : Union[str, Any] = xmod_layer.self_attn.out_proj.weight
__UpperCamelCase : str = xmod_layer.self_attn.out_proj.bias
__UpperCamelCase : Dict = xmod_layer.self_attn_layer_norm.weight
__UpperCamelCase : Any = xmod_layer.self_attn_layer_norm.bias
# intermediate
__UpperCamelCase : Dict = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of intermediate weights do not match." )
__UpperCamelCase : List[Any] = xmod_layer.fca.weight
__UpperCamelCase : Optional[int] = xmod_layer.fca.bias
# output
__UpperCamelCase : List[Any] = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError("Dimensions of feed-forward weights do not match." )
__UpperCamelCase : Tuple = xmod_layer.fca.weight
__UpperCamelCase : int = xmod_layer.fca.bias
__UpperCamelCase : Dict = xmod_layer.final_layer_norm.weight
__UpperCamelCase : int = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
__UpperCamelCase : Any = xmod_layer.adapter_layer_norm.weight
__UpperCamelCase : int = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError("Lists of language adapters do not match." )
for lang_code, adapter in xmod_layer.adapter_modules.items():
__UpperCamelCase : Any = bert_output.adapter_modules[lang_code]
__UpperCamelCase : Dict = xmod_layer.adapter_modules[lang_code]
__UpperCamelCase : int = from_adapter.fca.weight
__UpperCamelCase : Dict = from_adapter.fca.bias
__UpperCamelCase : List[Any] = from_adapter.fca.weight
__UpperCamelCase : int = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
__UpperCamelCase : Tuple = xmod_sent_encoder.layer_norm.weight
__UpperCamelCase : List[Any] = xmod_sent_encoder.layer_norm.bias
if classification_head:
__UpperCamelCase : Optional[Any] = xmod.model.classification_heads["mnli"].dense.weight
__UpperCamelCase : Any = xmod.model.classification_heads["mnli"].dense.bias
__UpperCamelCase : Tuple = xmod.model.classification_heads["mnli"].out_proj.weight
__UpperCamelCase : List[Any] = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
__UpperCamelCase : Any = xmod.model.encoder.lm_head.dense.weight
__UpperCamelCase : Optional[Any] = xmod.model.encoder.lm_head.dense.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.layer_norm.weight
__UpperCamelCase : List[Any] = xmod.model.encoder.lm_head.layer_norm.bias
__UpperCamelCase : Tuple = xmod.model.encoder.lm_head.weight
__UpperCamelCase : Any = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
__UpperCamelCase : Any = xmod.encode(snake_case__ ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(snake_case__ )
__UpperCamelCase : Optional[Any] = model(snake_case__ )[0]
if classification_head:
__UpperCamelCase : int = xmod.model.classification_heads["mnli"](xmod.extract_features(snake_case__ ) )
else:
__UpperCamelCase : Optional[Any] = xmod.model(snake_case__ , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
__UpperCamelCase : Dict = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__UpperCamelCase : Union[str, Any] = torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
Path(snake_case__ ).mkdir(parents=snake_case__ , exist_ok=snake_case__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowerCAmelCase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 298 | 0 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : Optional[int] = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowercase : Dict = "data2vec-audio"
def __init__( self , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , _SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , _SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=19 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=0.05 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="sum" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , _SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , _SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Any:
super().__init__(**_snake_case , pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case )
A_ = hidden_size
A_ = feat_extract_activation
A_ = list(_snake_case )
A_ = list(_snake_case )
A_ = list(_snake_case )
A_ = conv_bias
A_ = num_conv_pos_embeddings
A_ = num_conv_pos_embedding_groups
A_ = conv_pos_kernel_size
A_ = len(self.conv_dim )
A_ = num_hidden_layers
A_ = intermediate_size
A_ = hidden_act
A_ = num_attention_heads
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = feat_proj_dropout
A_ = final_dropout
A_ = layerdrop
A_ = layer_norm_eps
A_ = initializer_range
A_ = vocab_size
A_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ = mask_time_prob
A_ = mask_time_length
A_ = mask_time_min_masks
A_ = mask_feature_prob
A_ = mask_feature_length
A_ = mask_feature_min_masks
# ctc loss
A_ = ctc_loss_reduction
A_ = ctc_zero_infinity
# adapter
A_ = add_adapter
A_ = adapter_kernel_size
A_ = adapter_stride
A_ = num_adapter_layers
A_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A_ = list(_snake_case )
A_ = list(_snake_case )
A_ = list(_snake_case )
A_ = xvector_output_dim
@property
def __A ( self ) -> int:
return math.prod(self.conv_stride )
| 350 | '''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case : Optional[int] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__snake_case : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _UpperCAmelCase ( _UpperCamelCase : str ) -> int:
with open(_UpperCamelCase, '''rb''' ) as f:
A_ = Image.open(_UpperCamelCase )
return im.convert('''RGB''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the training data.'} )
__lowercase : Optional[str] = field(default=_UpperCamelCase , metadata={'help': 'A folder containing the validation data.'} )
__lowercase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowercase : Optional[int] = field(
default=_UpperCamelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowercase : str = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_UpperCamelCase )} , )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowercase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowercase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowercase : str = field(default=_UpperCamelCase , metadata={'help': 'Name or path of preprocessor config.'} )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowercase : bool = field(
default=_UpperCamelCase , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _UpperCAmelCase ( _UpperCamelCase : str ) -> Dict:
A_ = torch.stack([example['''pixel_values'''] for example in examples] )
A_ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _UpperCAmelCase ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A_ ,A_ ,A_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A_ ,A_ ,A_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''', _UpperCamelCase, _UpperCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A_ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
A_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
A_ = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, task='''image-classification''', use_auth_token=True if model_args.use_auth_token else None, )
else:
A_ = {}
if data_args.train_dir is not None:
A_ = os.path.join(data_args.train_dir, '''**''' )
if data_args.validation_dir is not None:
A_ = os.path.join(data_args.validation_dir, '''**''' )
A_ = load_dataset(
'''imagefolder''', data_files=_UpperCamelCase, cache_dir=model_args.cache_dir, task='''image-classification''', )
# If we don't have a validation split, split off a percentage of train as validation.
A_ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, _UpperCamelCase ) and data_args.train_val_split > 0.0:
A_ = dataset['''train'''].train_test_split(data_args.train_val_split )
A_ = split['''train''']
A_ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A_ = dataset['''train'''].features['''labels'''].names
A_ ,A_ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
A_ = str(_UpperCamelCase )
A_ = label
# Load the accuracy metric from the datasets package
A_ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Optional[Any] ):
return metric.compute(predictions=np.argmax(p.predictions, axis=1 ), references=p.label_ids )
A_ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path, num_labels=len(_UpperCamelCase ), labelaid=_UpperCamelCase, idalabel=_UpperCamelCase, finetuning_task='''image-classification''', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
A_ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=_UpperCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
A_ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
A_ = image_processor.size['''shortest_edge''']
else:
A_ = (image_processor.size['''height'''], image_processor.size['''width'''])
A_ = Normalize(mean=image_processor.image_mean, std=image_processor.image_std )
A_ = Compose(
[
RandomResizedCrop(_UpperCamelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
A_ = Compose(
[
Resize(_UpperCamelCase ),
CenterCrop(_UpperCamelCase ),
ToTensor(),
normalize,
] )
def train_transforms(_UpperCamelCase : Dict ):
A_ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(_UpperCamelCase : Any ):
A_ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
A_ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(_UpperCamelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
A_ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(_UpperCamelCase )
# Initalize our trainer
A_ = Trainer(
model=_UpperCamelCase, args=_UpperCamelCase, train_dataset=dataset['''train'''] if training_args.do_train else None, eval_dataset=dataset['''validation'''] if training_args.do_eval else None, compute_metrics=_UpperCamelCase, tokenizer=_UpperCamelCase, data_collator=_UpperCamelCase, )
# Training
if training_args.do_train:
A_ = None
if training_args.resume_from_checkpoint is not None:
A_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A_ = last_checkpoint
A_ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics('''train''', train_result.metrics )
trainer.save_metrics('''train''', train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A_ = trainer.evaluate()
trainer.log_metrics('''eval''', _UpperCamelCase )
trainer.save_metrics('''eval''', _UpperCamelCase )
# Write model card and (optionally) push to hub
A_ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 18 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : int = 1_6 , lowerCAmelCase_ : int = 8_8 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : int = 3_2 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : str = "geglu" , lowerCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
super().__init__()
_A: int = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowerCAmelCase_ , attention_head_dim=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , num_layers=lowerCAmelCase_ , dropout=lowerCAmelCase_ , norm_num_groups=lowerCAmelCase_ , cross_attention_dim=lowerCAmelCase_ , attention_bias=lowerCAmelCase_ , sample_size=lowerCAmelCase_ , num_vector_embeds=lowerCAmelCase_ , activation_fn=lowerCAmelCase_ , num_embeds_ada_norm=lowerCAmelCase_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_A: List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_A: List[str] = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_A: Tuple = [1, 0]
def __magic_name__ ( self : Optional[int] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : bool = True , ):
"""simple docstring"""
_A: Union[str, Any] = hidden_states
_A: Any = []
_A: List[Any] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_A: Dict = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_A: Optional[Any] = self.transformer_index_for_condition[i]
_A: Union[str, Any] = self.transformers[transformer_index](
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , timestep=lowerCAmelCase_ , cross_attention_kwargs=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_A: Union[str, Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_A: List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowerCAmelCase_ )
| 121 |
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
__snake_case : Optional[int] = ['small', 'medium', 'large']
__snake_case : Optional[int] = 'lm_head.decoder.weight'
__snake_case : List[Any] = 'lm_head.weight'
def __lowerCamelCase ( __snake_case : str, __snake_case : str ) -> int:
"""simple docstring"""
A__ : str =torch.load(__snake_case )
A__ : List[Any] =d.pop(__snake_case )
os.makedirs(__snake_case, exist_ok=__snake_case )
torch.save(__snake_case, os.path.join(__snake_case, __snake_case ) )
if __name__ == "__main__":
__snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
__snake_case : Tuple = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
__snake_case : Dict = os.path.join(args.dialogpt_path, F"""{MODEL}_ft.pkl""")
__snake_case : List[Any] = F"""./DialoGPT-{MODEL}"""
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134 | 0 |
import math
def UpperCamelCase ( __lowerCamelCase : int ):
snake_case : List[Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : float = 1 / 12345 ):
snake_case : Optional[Any] = 0
snake_case : Optional[int] = 0
snake_case : Optional[int] = 3
while True:
snake_case : Optional[int] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__lowerCamelCase ):
snake_case : Dict = int(__lowerCamelCase )
total_partitions += 1
if check_partition_perfect(__lowerCamelCase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__lowerCamelCase )
integer += 1
if __name__ == "__main__":
print(F'{solution() = }')
| 10 |
from __future__ import annotations
def UpperCamelCase ( __lowerCamelCase : list[int] ):
snake_case : Optional[int] = len(__lowerCamelCase ) // 2
# choose the middle 3 elements
snake_case : str = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 10 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
a_ = None
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'
),
},
}
a_ = {
'moussaKam/mbarthez': 1024,
'moussaKam/barthez': 1024,
'moussaKam/barthez-orangesum-title': 1024,
}
a_ = '▁'
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Any=None , a : Optional[int]="<s>" , a : Optional[Any]="</s>" , a : Any="</s>" , a : List[Any]="<s>" , a : str="<unk>" , a : Tuple="<pad>" , a : int="<mask>" , **a : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
SCREAMING_SNAKE_CASE : List[Any] = vocab_file
SCREAMING_SNAKE_CASE : Dict = False if not self.vocab_file else True
def __UpperCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = [self.sep_token_id]
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCamelCase ( self : Union[str, Any] , a : str , a : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(a ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(
a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 76 |
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
_lowercase =self.layer[current_layer](lowerCAmelCase , lowerCAmelCase , head_mask[current_layer] )
_lowercase =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =BertEncoderWithPabee(lowerCAmelCase )
self.init_weights()
_lowercase =0
_lowercase =0
_lowercase =0
_lowercase =0
def A__ ( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =threshold
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =patience
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =0
_lowercase =0
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =self.inference_layers_num / self.inference_instances_num
_lowercase =(
F'''*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ='''
F''' {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***'''
)
print(lowerCAmelCase )
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=False , ) -> str:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowercase =input_ids.size()
elif inputs_embeds is not None:
_lowercase =inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowercase =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
if token_type_ids is None:
_lowercase =torch.zeros(lowerCAmelCase , dtype=torch.long , device=lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowercase =self.get_extended_attention_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
_lowercase , _lowercase , _lowercase =encoder_hidden_states.size()
_lowercase =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
_lowercase =torch.ones(lowerCAmelCase , device=lowerCAmelCase )
_lowercase =self.invert_attention_mask(lowerCAmelCase )
else:
_lowercase =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowercase =self.get_head_mask(lowerCAmelCase , self.config.num_hidden_layers )
_lowercase =self.embeddings(
input_ids=lowerCAmelCase , position_ids=lowerCAmelCase , token_type_ids=lowerCAmelCase , inputs_embeds=lowerCAmelCase )
_lowercase =embedding_output
if self.training:
_lowercase =[]
for i in range(self.config.num_hidden_layers ):
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](output_dropout(lowerCAmelCase ) )
res.append(lowerCAmelCase )
elif self.patience == 0: # Use all layers for inference
_lowercase =self.encoder(
lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , encoder_attention_mask=lowerCAmelCase , )
_lowercase =self.pooler(encoder_outputs[0] )
_lowercase =[output_layers[self.config.num_hidden_layers - 1](lowerCAmelCase )]
else:
_lowercase =0
_lowercase =None
_lowercase =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
_lowercase =self.encoder.adaptive_forward(
lowerCAmelCase , current_layer=lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase )
_lowercase =self.pooler(lowerCAmelCase )
_lowercase =output_layers[i](lowerCAmelCase )
if regression:
_lowercase =logits.detach()
if patient_result is not None:
_lowercase =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
_lowercase =0
else:
_lowercase =logits.detach().argmax(dim=1 )
if patient_result is not None:
_lowercase =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(lowerCAmelCase ) ):
patient_counter += 1
else:
_lowercase =0
_lowercase =logits
if patient_counter == self.patience:
break
_lowercase =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
def __init__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase )
_lowercase =config.num_labels
_lowercase =BertModelWithPabee(lowerCAmelCase )
_lowercase =nn.Dropout(config.hidden_dropout_prob )
_lowercase =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.bert(
input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase , token_type_ids=lowerCAmelCase , position_ids=lowerCAmelCase , head_mask=lowerCAmelCase , inputs_embeds=lowerCAmelCase , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
_lowercase =(logits[-1],)
if labels is not None:
_lowercase =None
_lowercase =0
for ix, logits_item in enumerate(lowerCAmelCase ):
if self.num_labels == 1:
# We are doing regression
_lowercase =MSELoss()
_lowercase =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
_lowercase =CrossEntropyLoss()
_lowercase =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
_lowercase =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
_lowercase =(total_loss / total_weights,) + outputs
return outputs
| 205 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : Any , ) -> Any:
SCREAMING_SNAKE_CASE_ = coefficient_matrix.shape
SCREAMING_SNAKE_CASE_ = constant_matrix.shape
if rowsa != colsa:
SCREAMING_SNAKE_CASE_ = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(lowercase__ )
if colsa != 1:
SCREAMING_SNAKE_CASE_ = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(lowercase__ )
if rowsa != rowsa:
SCREAMING_SNAKE_CASE_ = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(lowercase__ )
if len(lowercase__ ) != rowsa:
SCREAMING_SNAKE_CASE_ = (
'Number of initial values must be equal to number of rows in coefficient '
f"matrix but received {len(lowercase__ )} and {rowsa}"
)
raise ValueError(lowercase__ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
SCREAMING_SNAKE_CASE_ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
SCREAMING_SNAKE_CASE_ = table.shape
strictly_diagonally_dominant(lowercase__ )
# Iterates the whole matrix for given number of times
for _ in range(lowercase__ ):
SCREAMING_SNAKE_CASE_ = []
for row in range(lowercase__ ):
SCREAMING_SNAKE_CASE_ = 0
for col in range(lowercase__ ):
if col == row:
SCREAMING_SNAKE_CASE_ = table[row][col]
elif col == cols - 1:
SCREAMING_SNAKE_CASE_ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
SCREAMING_SNAKE_CASE_ = (temp + val) / denom
new_val.append(lowercase__ )
SCREAMING_SNAKE_CASE_ = new_val
return [float(lowercase__ ) for i in new_val]
def UpperCAmelCase_ ( __UpperCAmelCase : Dict ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = table.shape
SCREAMING_SNAKE_CASE_ = True
for i in range(0 , lowercase__ ):
SCREAMING_SNAKE_CASE_ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod() | 362 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowerCamelCase__ : Union[str, Any] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : Union[str, Any]=None , ) -> List[Any]:
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any]=13 , _lowerCAmelCase : List[Any]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Union[str, Any]=99 , _lowerCAmelCase : List[str]=16 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : str=4 , _lowerCAmelCase : List[str]="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=32 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Union[str, Any]=0.02 , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
SCREAMING_SNAKE_CASE_ = initializer_range
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
SCREAMING_SNAKE_CASE_ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = prepare_blenderbot_inputs_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, inputs_dict
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ):
SCREAMING_SNAKE_CASE_ = 20
SCREAMING_SNAKE_CASE_ = model_class_name(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE_ = model.init_cache(decoder_input_ids.shape[0] , _lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, :-1] , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE_ = model.decode(
decoder_input_ids[:, -1:] , _lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowerCAmelCase , decoder_position_ids=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model.decode(_lowerCAmelCase , _lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"Max diff is {diff}" )
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = 99
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
SCREAMING_SNAKE_CASE_ = input_ids.shape[0]
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_config_and_data()
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = lm_model(input_ids=_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ = shift_tokens_right(_lowerCAmelCase , 1 , 2 )
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
SCREAMING_SNAKE_CASE_ = np.equal(_lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = True
lowercase_ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowercase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotModelTester(self )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
@jax.jit
def encode_jitted(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Optional[Any] ):
return model.encode(input_ids=_lowerCAmelCase , attention_mask=_lowerCAmelCase )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = encode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE_ = model_class(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE_ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ):
return model.decode(
decoder_input_ids=_lowerCAmelCase , decoder_attention_mask=_lowerCAmelCase , encoder_outputs=_lowerCAmelCase , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE_ = decode_jitted(**_lowerCAmelCase ).to_tuple()
self.assertEqual(len(_lowerCAmelCase ) , len(_lowerCAmelCase ) )
for jitted_output, output in zip(_lowerCAmelCase , _lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase_ ( self : str ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
SCREAMING_SNAKE_CASE_ = np.ones((1, 1) ) * model.config.eos_token_id
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
SCREAMING_SNAKE_CASE_ = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
SCREAMING_SNAKE_CASE_ = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
SCREAMING_SNAKE_CASE_ = ['Sam']
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCAmelCase , return_tensors='jax' )
SCREAMING_SNAKE_CASE_ = model.generate(**_lowerCAmelCase , **_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'Sam is a great name. It means "sun" in Gaelic.'
SCREAMING_SNAKE_CASE_ = tokenizer.batch_decode(_lowerCAmelCase , **_lowerCAmelCase )
assert generated_txt[0].strip() == tgt_text | 210 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'vocab_file': 'spiece.model'}
UpperCAmelCase_ = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
UpperCAmelCase_ = {'bert_for_seq_generation': 512}
class lowercase__ ( _UpperCAmelCase ):
'''simple docstring'''
a : int = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : int = []
a : Tuple = ["input_ids", "attention_mask"]
def __init__( self, __magic_name__, __magic_name__="<s>", __magic_name__="</s>", __magic_name__="<unk>", __magic_name__="<pad>", __magic_name__="<::::>", __magic_name__ = None, **__magic_name__, ) -> None:
"""simple docstring"""
UpperCamelCase__ : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=lowercase_, eos_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, sep_token=lowercase_, sp_model_kwargs=self.sp_model_kwargs, **lowercase_, )
UpperCamelCase__ : Dict = vocab_file
UpperCamelCase__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowercase_ )
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return self.sp_model.get_piece_size()
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.__dict__.copy()
UpperCamelCase__ : Any = None
return state
def __setstate__( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = d
# for backward compatibility
if not hasattr(self, '''sp_model_kwargs''' ):
UpperCamelCase__ : List[Any] = {}
UpperCamelCase__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self, __magic_name__ ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(lowercase_, out_type=lowercase_ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
return self.sp_model.piece_to_id(lowercase_ )
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.sp_model.IdToPiece(lowercase_ )
return token
def UpperCamelCase__ ( self, __magic_name__ ) -> int:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Tuple = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCamelCase__ : Union[str, Any] = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowercase_ ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : Dict = os.path.join(
lowercase_, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_, '''wb''' ) as fi:
UpperCamelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 201 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowercase_ = False
@skip_mps
class A ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = StableDiffusionAttendAndExcitePipeline
lowerCamelCase = False
lowerCamelCase = TEXT_TO_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def snake_case__ ( cls : Any )-> Optional[Any]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : Optional[Any] )-> Dict:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[str] )-> int:
'''simple docstring'''
torch.manual_seed(0 )
A__ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4),layers_per_block=1,sample_size=3_2,in_channels=4,out_channels=4,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'),up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'),cross_attention_dim=3_2,attention_head_dim=(2, 4),use_linear_projection=lowercase_,)
A__ = DDIMScheduler(
beta_start=0.00_085,beta_end=0.012,beta_schedule='scaled_linear',clip_sample=lowercase_,set_alpha_to_one=lowercase_,)
torch.manual_seed(0 )
A__ = AutoencoderKL(
block_out_channels=[3_2, 6_4],in_channels=3,out_channels=3,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'],up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'],latent_channels=4,sample_size=1_2_8,)
torch.manual_seed(0 )
A__ = CLIPTextConfig(
bos_token_id=0,eos_token_id=2,hidden_size=3_2,intermediate_size=3_7,layer_norm_eps=1E-05,num_attention_heads=4,num_hidden_layers=5,pad_token_id=1,vocab_size=1_0_0_0,hidden_act='gelu',projection_dim=5_1_2,)
A__ = CLIPTextModel(lowercase_ )
A__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def snake_case__ ( self : Tuple,lowercase_ : str,lowercase_ : List[Any]=0 )-> int:
'''simple docstring'''
if str(lowercase_ ).startswith('mps' ):
A__ = torch.manual_seed(lowercase_ )
else:
A__ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
A__ = A__ = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def snake_case__ ( self : List[str] )-> Optional[Any]:
'''simple docstring'''
A__ = 'cpu'
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
A__ = self.get_dummy_inputs(lowercase_ )
A__ = pipe(**lowercase_ ).images
A__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape,(1, 6_4, 6_4, 3) )
A__ = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
A__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowercase_,1E-3 )
def snake_case__ ( self : str )-> Optional[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def snake_case__ ( self : str )-> int:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case__ ( self : str )-> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2,expected_max_diff=7E-4 )
def snake_case__ ( self : Optional[Any] )-> int:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def snake_case__ ( self : Union[str, Any] )-> str:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def snake_case__ ( cls : Any )-> Optional[int]:
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowercase_ )
@classmethod
def snake_case__ ( cls : int )-> List[Any]:
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowercase_ )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self : Union[str, Any] )-> List[Any]:
'''simple docstring'''
A__ = torch.manual_seed(5_1 )
A__ = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4',safety_checker=lowercase_,torch_dtype=torch.floataa )
pipe.to('cuda' )
A__ = 'a painting of an elephant with glasses'
A__ = [5, 7]
A__ = pipe(
prompt=lowercase_,token_indices=lowercase_,guidance_scale=7.5,generator=lowercase_,num_inference_steps=5,max_iter_to_alter=5,output_type='numpy',).images[0]
A__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 7 | 0 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCamelCase :
def __init__(self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=3 , __a=None , ) -> Optional[Any]:
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_stages
UpperCamelCase = hidden_sizes
UpperCamelCase = depths
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = out_features
UpperCamelCase = num_labels
UpperCamelCase = scope
UpperCamelCase = num_stages
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case_ (self ) -> Optional[int]:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def snake_case_ (self ) -> Tuple:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=__a , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=__a , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def snake_case_ (self , __a , __a , __a ) -> Dict:
UpperCamelCase = UperNetForSemanticSegmentation(config=__a )
model.to(__a )
model.eval()
UpperCamelCase = model(__a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
UpperCAmelCase_ = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
def snake_case_ (self ) -> Tuple:
UpperCamelCase = UperNetModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def snake_case_ (self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case_ (self ) -> Dict:
return
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(__a )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def snake_case_ (self ) -> Dict:
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def snake_case_ (self ) -> Optional[int]:
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def snake_case_ (self ) -> Optional[int]:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case_ (self ) -> List[Any]:
pass
@unittest.skip(reason="UperNet does not have a base model" )
def snake_case_ (self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def snake_case_ (self ) -> int:
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ (self ) -> str:
pass
def snake_case_ (self ) -> List[Any]:
def check_hidden_states_output(__a , __a , __a ):
UpperCamelCase = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(__a , __a ) )
UpperCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(__a , __a , __a )
def snake_case_ (self ) -> Optional[Any]:
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = _config_zero_init(__a )
UpperCamelCase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(config=__a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def snake_case_ (self ) -> List[str]:
pass
@slow
def snake_case_ (self ) -> List[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained(__a )
self.assertIsNotNone(__a )
def a__ ( ):
"""simple docstring"""
UpperCamelCase = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
UpperCamelCase = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(__a )
UpperCamelCase = prepare_img()
UpperCamelCase = processor(images=__a , return_tensors="pt" ).to(__a )
with torch.no_grad():
UpperCamelCase = model(**__a )
UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __a )
UpperCamelCase = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4 ) )
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
UpperCamelCase = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(__a )
UpperCamelCase = prepare_img()
UpperCamelCase = processor(images=__a , return_tensors="pt" ).to(__a )
with torch.no_grad():
UpperCamelCase = model(**__a )
UpperCamelCase = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , __a )
UpperCamelCase = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1e-4 ) )
| 244 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCAmelCase_ = None
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase_ = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
lowerCAmelCase_ = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
lowerCAmelCase_ = '▁'
class __A ( A_ ):
'''simple docstring'''
lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = AlbertTokenizer
def __init__( self : List[Any] ,_snake_case : Tuple=None ,_snake_case : Union[str, Any]=None ,_snake_case : List[Any]=True ,_snake_case : int=True ,_snake_case : Dict=False ,_snake_case : Optional[int]="[CLS]" ,_snake_case : List[Any]="[SEP]" ,_snake_case : Tuple="<unk>" ,_snake_case : int="[SEP]" ,_snake_case : List[Any]="<pad>" ,_snake_case : Optional[int]="[CLS]" ,_snake_case : int="[MASK]" ,**_snake_case : Optional[Any] ,) -> List[str]:
"""simple docstring"""
lowercase__ : List[str] = (
AddedToken(_snake_case ,lstrip=_snake_case ,rstrip=_snake_case ,normalized=_snake_case )
if isinstance(_snake_case ,_snake_case )
else mask_token
)
super().__init__(
_snake_case ,tokenizer_file=_snake_case ,do_lower_case=_snake_case ,remove_space=_snake_case ,keep_accents=_snake_case ,bos_token=_snake_case ,eos_token=_snake_case ,unk_token=_snake_case ,sep_token=_snake_case ,pad_token=_snake_case ,cls_token=_snake_case ,mask_token=_snake_case ,**_snake_case ,)
lowercase__ : Optional[int] = do_lower_case
lowercase__ : str = remove_space
lowercase__ : Dict = keep_accents
lowercase__ : List[str] = vocab_file
lowercase__ : str = False if not self.vocab_file else True
def UpperCAmelCase ( self : Optional[Any] ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : Union[str, Any] = [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self : Dict ,_snake_case : List[int] ,_snake_case : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ : List[str] = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase ( self : List[str] ,_snake_case : str ,_snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
_snake_case ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file ,_snake_case )
return (out_vocab_file,)
| 16 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : str=False ):
"""simple docstring"""
lowerCAmelCase_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""deit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""deit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""deit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""deit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""deit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""deit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""deit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""deit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""deit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""deit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "deit.embeddings.cls_token"),
("dist_token", "deit.embeddings.distillation_token"),
("patch_embed.proj.weight", "deit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "deit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "deit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
lowerCAmelCase_ = [(pair[0], pair[1][4:]) if pair[1].startswith("deit" ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
("norm.weight", "deit.layernorm.weight"),
("norm.bias", "deit.layernorm.bias"),
("head.weight", "cls_classifier.weight"),
("head.bias", "cls_classifier.bias"),
("head_dist.weight", "distillation_classifier.weight"),
("head_dist.bias", "distillation_classifier.bias"),
] )
return rename_keys
def lowerCamelCase__ ( __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowerCAmelCase_ = ""
else:
lowerCAmelCase_ = "deit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
lowerCAmelCase_ = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCAmelCase_ = in_proj_weight[
: config.hidden_size, :
]
lowerCAmelCase_ = in_proj_bias[: config.hidden_size]
lowerCAmelCase_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCAmelCase_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCAmelCase_ = in_proj_weight[
-config.hidden_size :, :
]
lowerCAmelCase_ = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
"""simple docstring"""
lowerCAmelCase_ = dct.pop(__lowerCAmelCase )
lowerCAmelCase_ = val
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase_ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCAmelCase : List[str] , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = DeiTConfig()
# all deit models have fine-tuned heads
lowerCAmelCase_ = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
lowerCAmelCase_ = 1000
lowerCAmelCase_ = "huggingface/label-files"
lowerCAmelCase_ = "imagenet-1k-id2label.json"
lowerCAmelCase_ = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
lowerCAmelCase_ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase_ = idalabel
lowerCAmelCase_ = {v: k for k, v in idalabel.items()}
lowerCAmelCase_ = int(deit_name[-6:-4] )
lowerCAmelCase_ = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith("tiny" ):
lowerCAmelCase_ = 192
lowerCAmelCase_ = 768
lowerCAmelCase_ = 12
lowerCAmelCase_ = 3
elif deit_name[9:].startswith("small" ):
lowerCAmelCase_ = 384
lowerCAmelCase_ = 1536
lowerCAmelCase_ = 12
lowerCAmelCase_ = 6
if deit_name[9:].startswith("base" ):
pass
elif deit_name[4:].startswith("large" ):
lowerCAmelCase_ = 1024
lowerCAmelCase_ = 4096
lowerCAmelCase_ = 24
lowerCAmelCase_ = 16
# load original model from timm
lowerCAmelCase_ = timm.create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowerCAmelCase_ = timm_model.state_dict()
lowerCAmelCase_ = create_rename_keys(__lowerCAmelCase , __lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCAmelCase_ = DeiTForImageClassificationWithTeacher(__lowerCAmelCase ).eval()
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
lowerCAmelCase_ = int(
(256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
lowerCAmelCase_ = DeiTImageProcessor(size=__lowerCAmelCase , crop_size=config.image_size )
lowerCAmelCase_ = image_processor(images=prepare_img() , return_tensors="pt" )
lowerCAmelCase_ = encoding["pixel_values"]
lowerCAmelCase_ = model(__lowerCAmelCase )
lowerCAmelCase_ = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {deit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--deit_name",
default="vit_deit_base_distilled_patch16_224",
type=str,
help="Name of the DeiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_A = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 231 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( _UpperCamelCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def __init__(self , __a , __a ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__a , scheduler=__a )
@torch.no_grad()
def __call__(self , __a = 1 , __a = 50 , __a = None , __a = "pil" , __a = True , **__a , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
UpperCAmelCase__ = self.unet.config.sample_size
UpperCAmelCase__ = (batch_size, 3, img_size, img_size)
UpperCAmelCase__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
UpperCAmelCase__ = randn_tensor(__a , generator=__a , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__a )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
UpperCAmelCase__ = self.scheduler.schedule[t]
UpperCAmelCase__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
UpperCAmelCase__ , UpperCAmelCase__ = self.scheduler.add_noise_to_input(__a , __a , generator=__a )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
UpperCAmelCase__ = self.scheduler.step(__a , __a , __a , __a )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
UpperCAmelCase__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
UpperCAmelCase__ = self.scheduler.step_correct(
__a , __a , __a , __a , step_output.prev_sample , step_output['derivative'] , )
UpperCAmelCase__ = step_output.prev_sample
UpperCAmelCase__ = (sample / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a )
| 335 |
# flake8: noqa
# Lint as: python3
_UpperCamelCase = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 335 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
_a = 'src/diffusers'
# Matches is_xxx_available()
_a = re.compile(R'is\_([a-z_]*)_available\(\)')
# Matches from xxx import bla
_a = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
_a = '\n{0} = None\n'
_a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n'
_a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = _re_backend.findall(__lowerCamelCase )
if len(__lowerCamelCase ) == 0:
return None
return "_and_".join(__lowerCamelCase )
def __a ( ):
with open(os.path.join(__lowerCamelCase, "__init__.py" ), "r", encoding="utf-8", newline="\n" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Optional[int] = {}
# Go through the end of the file
while line_index < len(__lowerCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCAmelCase_ : Union[str, Any] = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith("else:" ):
line_index += 1
line_index += 1
UpperCAmelCase_ : List[str] = []
# Until we unindent, add backend objects to the list
while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1:
UpperCAmelCase_ : Union[str, Any] = lines[line_index]
UpperCAmelCase_ : Optional[Any] = _re_single_line_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__lowerCamelCase ) > 0:
UpperCAmelCase_ : Optional[int] = objects
else:
line_index += 1
return backend_specific_objects
def __a ( __lowerCamelCase, __lowerCamelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(__lowerCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(__lowerCamelCase, __lowerCamelCase )
else:
return DUMMY_CLASS.format(__lowerCamelCase, __lowerCamelCase )
def __a ( __lowerCamelCase=None ):
if backend_specific_objects is None:
UpperCAmelCase_ : Tuple = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCAmelCase_ : str = {}
for backend, objects in backend_specific_objects.items():
UpperCAmelCase_ : int = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]"
UpperCAmelCase_ : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__lowerCamelCase, __lowerCamelCase ) for o in objects] )
UpperCAmelCase_ : int = dummy_file
return dummy_files
def __a ( __lowerCamelCase=False ):
UpperCAmelCase_ : Optional[Any] = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCAmelCase_ : Union[str, Any] = {"torch": "pt"}
# Locate actual dummy modules and read their content.
UpperCAmelCase_ : List[str] = os.path.join(__lowerCamelCase, "utils" )
UpperCAmelCase_ : Optional[int] = {
backend: os.path.join(__lowerCamelCase, f"""dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py""" )
for backend in dummy_files.keys()
}
UpperCAmelCase_ : Any = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
UpperCAmelCase_ : Optional[int] = f.read()
else:
UpperCAmelCase_ : Any = ""
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"""Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py as the main """
"__init__ has new objects." )
with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n" ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"""diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py. Run `make fix-copies` """
"to fix this." )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_a = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 61 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def _lowercase ( lowercase__ ):
__lowerCAmelCase : str = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : str = []
for rt in rc.restypes:
__lowerCAmelCase : List[Any] = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
__lowerCAmelCase : List[str] = {name: i for i, name in enumerate(lowercase__ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 1_4 )
restype_atomaa_to_atomaa_list.append([0] * 3_7 )
restype_atomaa_mask_list.append([0.0] * 1_4 )
__lowerCAmelCase : List[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Optional[Any] = torch.tensor(
lowercase__ , dtype=torch.intaa , device=protein['''aatype'''].device , )
__lowerCAmelCase : Tuple = torch.tensor(
lowercase__ , dtype=torch.floataa , device=protein['''aatype'''].device , )
__lowerCAmelCase : List[Any] = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
__lowerCAmelCase : Any = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : int = residx_atomaa_mask
__lowerCAmelCase : List[str] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
__lowerCAmelCase : int = restype_atomaa_to_atomaa[protein_aatype]
__lowerCAmelCase : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
__lowerCAmelCase : str = torch.zeros([2_1, 3_7] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
__lowerCAmelCase : Optional[int] = rc.restype_atoa[restype_letter]
__lowerCAmelCase : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
__lowerCAmelCase : str = rc.atom_order[atom_name]
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = restype_atomaa_mask[protein_aatype]
__lowerCAmelCase : Any = residx_atomaa_mask
return protein
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = tree_map(lambda lowercase__ : torch.tensor(lowercase__ , device=batch['''aatype'''].device ) , lowercase__ , np.ndarray )
__lowerCAmelCase : Tuple = tensor_tree_map(lambda lowercase__ : np.array(lowercase__ ) , make_atomaa_masks(lowercase__ ) )
return out
| 275 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
A_ : int = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = self.dummy_uncond_unet
A_ : Union[str, Any] = PNDMScheduler()
A_ : Dict = PNDMPipeline(unet=lowercase , scheduler=lowercase )
pndm.to(lowercase )
pndm.set_progress_bar_config(disable=lowercase )
A_ : List[str] = torch.manual_seed(0 )
A_ : List[str] = pndm(generator=lowercase , num_inference_steps=2_0 , output_type='numpy' ).images
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : List[str] = pndm(generator=lowercase , num_inference_steps=2_0 , output_type='numpy' , return_dict=lowercase )[0]
A_ : str = image[0, -3:, -3:, -1]
A_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A_ : int = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = 'google/ddpm-cifar10-32'
A_ : List[str] = UNetaDModel.from_pretrained(lowercase )
A_ : List[Any] = PNDMScheduler()
A_ : int = PNDMPipeline(unet=lowercase , scheduler=lowercase )
pndm.to(lowercase )
pndm.set_progress_bar_config(disable=lowercase )
A_ : List[str] = torch.manual_seed(0 )
A_ : Tuple = pndm(generator=lowercase , output_type='numpy' ).images
A_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
A_ : Optional[int] = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 363 | from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
_UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCAmelCase = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = 42
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
super().__init__()
self.register_modules(
prior=lowercase , image_encoder=lowercase , image_processor=lowercase , scheduler=lowercase , renderer=lowercase , )
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
if latents is None:
A_ : Optional[Any] = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
A_ : Optional[int] = latents.to(lowercase )
A_ : List[Any] = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase_ ( self , lowercase=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A_ : Tuple = torch.device(F'''cuda:{gpu_id}''' )
A_ : Dict = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowerCAmelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ):
"""simple docstring"""
if isinstance(lowercase , lowercase ) and isinstance(image[0] , torch.Tensor ):
A_ : Tuple = torch.cat(lowercase , axis=0 ) if image[0].ndim == 4 else torch.stack(lowercase , axis=0 )
if not isinstance(lowercase , torch.Tensor ):
A_ : Dict = self.image_processor(lowercase , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
A_ : List[str] = image.to(dtype=self.image_encoder.dtype , device=lowercase )
A_ : Tuple = self.image_encoder(lowercase )['last_hidden_state']
A_ : Dict = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
A_ : List[str] = image_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
A_ : str = torch.zeros_like(lowercase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase = 1 , lowercase = 2_5 , lowercase = None , lowercase = None , lowercase = 4.0 , lowercase = 6_4 , lowercase = "pil" , lowercase = True , ):
"""simple docstring"""
if isinstance(lowercase , PIL.Image.Image ):
A_ : int = 1
elif isinstance(lowercase , torch.Tensor ):
A_ : int = image.shape[0]
elif isinstance(lowercase , lowercase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
A_ : List[str] = len(lowercase )
else:
raise ValueError(
F'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(lowercase )}''' )
A_ : Any = self._execution_device
A_ : List[Any] = batch_size * num_images_per_prompt
A_ : int = guidance_scale > 1.0
A_ : Optional[int] = self._encode_image(lowercase , lowercase , lowercase , lowercase )
# prior
self.scheduler.set_timesteps(lowercase , device=lowercase )
A_ : Dict = self.scheduler.timesteps
A_ : int = self.prior.config.num_embeddings
A_ : int = self.prior.config.embedding_dim
A_ : Dict = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
A_ : Union[str, Any] = latents.reshape(latents.shape[0] , lowercase , lowercase )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
A_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = self.scheduler.scale_model_input(lowercase , lowercase )
A_ : Any = self.prior(
lowercase , timestep=lowercase , proj_embedding=lowercase , ).predicted_image_embedding
# remove the variance
A_ , A_ : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
A_ , A_ : List[Any] = noise_pred.chunk(2 )
A_ : str = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
A_ : Optional[int] = self.scheduler.step(
lowercase , timestep=lowercase , sample=lowercase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=lowercase )
A_ : str = []
for i, latent in enumerate(lowercase ):
print()
A_ : Optional[Any] = self.renderer.decode(
latent[None, :] , lowercase , size=lowercase , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(lowercase )
A_ : Dict = torch.stack(lowercase )
if output_type not in ["np", "pil"]:
raise ValueError(F'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
A_ : Dict = images.cpu().numpy()
if output_type == "pil":
A_ : str = [self.numpy_to_pil(lowercase ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=lowercase )
| 192 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
__UpperCamelCase : Optional[int] = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=snake_case_ )
def _a ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = tmp_path_factory.getbasetemp() / '''cache'''
UpperCamelCase__ : Optional[Any] = test_hf_cache_home / '''datasets'''
UpperCamelCase__ : str = test_hf_cache_home / '''metrics'''
UpperCamelCase__ : int = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(snake_case_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(snake_case_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(snake_case_ ) )
UpperCamelCase__ : int = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(snake_case_ ) )
UpperCamelCase__ : int = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(snake_case_ ) )
@pytest.fixture(autouse=snake_case_ , scope='''session''' )
def _a ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=snake_case_ )
def _a ( SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , snake_case_ )
@pytest.fixture
def _a ( SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , snake_case_ )
| 146 | '''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _lowercase ( datasets.BuilderConfig ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
class _lowercase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = PandasConfig
def a ( self : Union[str, Any] ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> str:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__lowerCAmelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE__ , (str, list, tuple) ):
__lowerCAmelCase = data_files
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__lowerCAmelCase = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__lowerCAmelCase = [dl_manager.iter_files(SCREAMING_SNAKE_CASE__ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE__ , gen_kwargs={"""files""": files} ) )
return splits
def a ( self : Any , SCREAMING_SNAKE_CASE__ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__lowerCAmelCase = table_cast(SCREAMING_SNAKE_CASE__ , self.config.features.arrow_schema )
return pa_table
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]:
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE__ ) ):
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as f:
__lowerCAmelCase = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE__ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE__ )
| 229 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , ):
'''simple docstring'''
A : Optional[int] = {}
if train_file is not None:
A : int = [train_file]
if eval_file is not None:
A : List[str] = [eval_file]
if test_file is not None:
A : Optional[Any] = [test_file]
A : Union[str, Any] = datasets.load_dataset('''csv''' , data_files=snake_case__ )
A : int = list(ds[list(files.keys() )[0]].features.keys() )
A : Optional[int] = features_name.pop(snake_case__ )
A : List[Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
A : Dict = {label: i for i, label in enumerate(snake_case__ )}
A : List[str] = tokenizer.model_input_names
A : List[Any] = {}
if len(snake_case__ ) == 1:
for k in files.keys():
A : Any = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=snake_case__ , max_length=snake_case__ , padding='''max_length''' ) , batched=snake_case__ , )
elif len(snake_case__ ) == 2:
for k in files.keys():
A : Tuple = ds[k].map(
lambda snake_case__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=snake_case__ , max_length=snake_case__ , padding='''max_length''' , ) , batched=snake_case__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
A : Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A : List[str] = {k: v for k, v in ex.items() if k in input_names}
A : Optional[Any] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
A : Tuple = labelaid[ex[label_name]]
yield (d, label)
A : int = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A : int = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A : Union[str, Any] = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A : str = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A : Union[str, Any] = (
tf.data.Dataset.from_generator(
snake_case__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A : Optional[Any] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowercase : List[str] = logging.getLogger(__name__)
@dataclass
class A :
__magic_name__ = field(metadata={'''help''': '''Which column contains the label'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''The path of the training file'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''The path of the development file'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''The path of the test file'''} )
__magic_name__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class A :
__magic_name__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
__magic_name__ = field(default=__snake_case , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__magic_name__ = field(
default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A, A, A : Any = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
A, A, A, A : List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=snake_case__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
A : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
A : Union[str, Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , )
def compute_metrics(snake_case__ ) -> Dict:
A : Optional[int] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A : Optional[int] = TFTrainer(
model=snake_case__ , args=snake_case__ , train_dataset=snake_case__ , eval_dataset=snake_case__ , compute_metrics=snake_case__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A : Any = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A : int = trainer.evaluate()
A : str = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(snake_case__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(snake_case__ )
return results
if __name__ == "__main__":
main()
| 311 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Optional[int] = logging.get_logger(__name__)
lowercase : Tuple = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class A ( __snake_case ):
__magic_name__ = '''pix2struct_text_model'''
__magic_name__ = ['''past_key_values''']
__magic_name__ = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=50244 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
A : str = vocab_size
A : List[str] = hidden_size
A : List[Any] = d_kv
A : Optional[Any] = d_ff
A : Dict = num_layers
A : Dict = num_heads
A : Optional[int] = relative_attention_num_buckets
A : Optional[Any] = relative_attention_max_distance
A : Dict = dropout_rate
A : Dict = layer_norm_epsilon
A : Tuple = initializer_factor
A : Union[str, Any] = use_cache
A : int = eos_token_id
A : List[str] = decoder_start_token_id
# for backwards compatibility
A : int = dense_act_fn
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , decoder_start_token_id=SCREAMING_SNAKE_CASE , tie_word_embeddings=SCREAMING_SNAKE_CASE , is_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : Optional[Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Union[str, Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=2048 , SCREAMING_SNAKE_CASE=64 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE="gelu_new" , SCREAMING_SNAKE_CASE=1e-6 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
A : List[str] = hidden_size
A : Optional[Any] = patch_embed_hidden_size
A : Union[str, Any] = d_ff
A : Dict = dropout_rate
A : str = num_hidden_layers
A : Dict = num_attention_heads
A : Tuple = initializer_range
A : List[str] = initializer_factor
A : Union[str, Any] = attention_dropout
A : Tuple = layer_norm_eps
A : int = dense_act_fn
A : Optional[int] = seq_len
A : Tuple = relative_attention_num_buckets
A : str = relative_attention_max_distance
A : Optional[Any] = d_kv
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
A, A : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('''model_type''' ) == "pix2struct":
A : Optional[Any] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class A ( __snake_case ):
__magic_name__ = '''pix2struct'''
__magic_name__ = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Any:
"""simple docstring"""
super().__init__(tie_word_embeddings=SCREAMING_SNAKE_CASE , is_encoder_decoder=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if text_config is None:
A : Dict = {}
logger.info('''text_config is None. Initializing the Pix2StructTextConfig with default values.''' )
if vision_config is None:
A : str = {}
logger.info('''vision_config is None. Initializing the Pix2StructVisionConfig with default values.''' )
A : Dict = PixaStructTextConfig(**SCREAMING_SNAKE_CASE )
A : Any = PixaStructVisionConfig(**SCREAMING_SNAKE_CASE )
A : Any = self.text_config.decoder_start_token_id
A : Any = self.text_config.pad_token_id
A : Dict = self.text_config.eos_token_id
A : Union[str, Any] = initializer_factor
A : Tuple = initializer_range
A : Optional[Any] = self.initializer_range
A : int = self.initializer_range
A : Tuple = is_vqa
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Tuple = copy.deepcopy(self.__dict__ )
A : Dict = self.text_config.to_dict()
A : int = self.vision_config.to_dict()
A : Any = self.__class__.model_type
return output
| 311 | 1 |
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :Optional[int] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ :List[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
snake_case_ :Dict = """The dog is cute and lives in the garden house"""
snake_case_ :Dict = jnp.array([tokenizer.encode(snake_case )] )
snake_case_ :Optional[Any] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
snake_case_ :Optional[Any] = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
snake_case_ :List[Any] = model(snake_case )["""last_hidden_state"""]
self.assertEqual(output.shape , snake_case )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , snake_case , atol=1E-3 ) )
| 66 | def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = int(lowerCAmelCase )
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = divmod(lowerCAmelCase , 2 )
return binary_recursive(lowerCAmelCase ) + str(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = str(lowerCAmelCase ).strip()
if not number:
raise ValueError("No input value was provided" )
SCREAMING_SNAKE_CASE_ : List[str] = "-" if number.startswith("-" ) else ""
SCREAMING_SNAKE_CASE_ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'{negative}0b{binary_recursive(int(lowerCAmelCase ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 18 | 0 |
from __future__ import annotations
import time
import numpy as np
lowercase__ : Optional[Any] = [8, 5, 9, 7]
lowercase__ : Optional[int] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowercase__ : Any = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : list[list[int]] , ) -> int:
'''simple docstring'''
_UpperCamelCase = claim_vector
_UpperCamelCase = allocated_resources_table
_UpperCamelCase = maximum_claim_table
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def snake_case__ ( self : List[Any] ) -> Any:
'''simple docstring'''
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def snake_case__ ( self : int ) -> List[Any]:
'''simple docstring'''
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__SCREAMING_SNAKE_CASE ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return {self.__need().index(__SCREAMING_SNAKE_CASE ): i for i in self.__need()}
def snake_case__ ( self : Union[str, Any] , **lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
_UpperCamelCase = self.__need()
_UpperCamelCase = self.__allocated_resources_table
_UpperCamelCase = self.__available_resources()
_UpperCamelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
_UpperCamelCase = False
for each_need in need_list:
_UpperCamelCase = True
for index, need in enumerate(__SCREAMING_SNAKE_CASE ):
if need > available_resources[index]:
_UpperCamelCase = False
break
if execution:
_UpperCamelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_UpperCamelCase = original_need_index
print(f"""Process {process_number + 1} is executing.""" )
# remove the process run from stack
need_list.remove(__SCREAMING_SNAKE_CASE )
# update available/freed resources stack
_UpperCamelCase = np.array(__SCREAMING_SNAKE_CASE ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(__SCREAMING_SNAKE_CASE ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f"""P{self.__allocated_resources_table.index(__SCREAMING_SNAKE_CASE ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f"""P{self.__maximum_claim_table.index(__SCREAMING_SNAKE_CASE ) + 1}"""
+ ''' '''.join(f"""{it:>8}""" for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(__SCREAMING_SNAKE_CASE ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(__SCREAMING_SNAKE_CASE ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363 |
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Optional[Any] = get_tests_dir('fixtures/test_sentencepiece.model')
lowercase__ : Any = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowercase__ : Tuple = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = CamembertTokenizer
_snake_case : str = CamembertTokenizerFast
_snake_case : int = True
_snake_case : List[str] = True
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase__ ) , 1004 )
def snake_case__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = CamembertTokenizer(lowerCAmelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
_UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
_UpperCamelCase = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.tokenize(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(lowerCAmelCase__ )
_UpperCamelCase = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
_UpperCamelCase = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=lowerCAmelCase__ , )
| 287 | 0 |
import math
def lowerCAmelCase_ ( __a ) -> bool:
"""simple docstring"""
lowerCamelCase__: Any =math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(__a )
def lowerCAmelCase_ ( __a = 1 / 12345 ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =0
lowerCamelCase__: List[Any] =0
lowerCamelCase__: str =3
while True:
lowerCamelCase__: Optional[int] =(integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(__a ):
lowerCamelCase__: List[Any] =int(__a )
total_partitions += 1
if check_partition_perfect(__a ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(__a )
integer += 1
if __name__ == "__main__":
print(f'{solution() = }')
| 10 |
from math import ceil, sqrt
def lowerCAmelCase_ ( __a = 1000000 ) -> int:
"""simple docstring"""
lowerCamelCase__: Optional[int] =0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowerCamelCase__: Dict =max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowerCamelCase__: str =1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(f'{solution() = }')
| 10 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
def lowercase_ ( _lowerCamelCase: str ) -> YolosConfig:
'''simple docstring'''
__lowerCamelCase : Dict = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
__lowerCamelCase : str = 192
__lowerCamelCase : int = 768
__lowerCamelCase : Optional[int] = 12
__lowerCamelCase : Tuple = 3
__lowerCamelCase : Optional[int] = [800, 1333]
__lowerCamelCase : Any = False
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase : Optional[Any] = 330
__lowerCamelCase : List[Any] = 14
__lowerCamelCase : Union[str, Any] = 6
__lowerCamelCase : List[Any] = 1320
elif "yolos_s" in yolos_name:
__lowerCamelCase : int = 384
__lowerCamelCase : Dict = 1536
__lowerCamelCase : Any = 12
__lowerCamelCase : Union[str, Any] = 6
elif "yolos_b" in yolos_name:
__lowerCamelCase : str = [800, 1344]
__lowerCamelCase : List[str] = 91
__lowerCamelCase : Any = "huggingface/label-files"
__lowerCamelCase : Optional[int] = "coco-detection-id2label.json"
__lowerCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="dataset" ) , "r" ) )
__lowerCamelCase : Dict = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__lowerCamelCase : Any = idalabel
__lowerCamelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowercase_ ( _lowerCamelCase: dict , _lowerCamelCase: YolosConfig , _lowerCamelCase: bool = False ) -> int:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCamelCase : List[str] = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCamelCase : Union[str, Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCamelCase : List[str] = in_proj_weight[: config.hidden_size, :]
__lowerCamelCase : str = in_proj_bias[: config.hidden_size]
__lowerCamelCase : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCamelCase : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCamelCase : Optional[int] = in_proj_weight[-config.hidden_size :, :]
__lowerCamelCase : int = in_proj_bias[-config.hidden_size :]
def lowercase_ ( _lowerCamelCase: str ) -> str:
'''simple docstring'''
if "backbone" in name:
__lowerCamelCase : List[Any] = name.replace("backbone" , "vit" )
if "cls_token" in name:
__lowerCamelCase : Tuple = name.replace("cls_token" , "embeddings.cls_token" )
if "det_token" in name:
__lowerCamelCase : List[str] = name.replace("det_token" , "embeddings.detection_tokens" )
if "mid_pos_embed" in name:
__lowerCamelCase : Union[str, Any] = name.replace("mid_pos_embed" , "encoder.mid_position_embeddings" )
if "pos_embed" in name:
__lowerCamelCase : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
__lowerCamelCase : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "blocks" in name:
__lowerCamelCase : Dict = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
__lowerCamelCase : Tuple = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__lowerCamelCase : Dict = name.replace("attn" , "attention.self" )
if "norm1" in name:
__lowerCamelCase : List[str] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__lowerCamelCase : Any = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__lowerCamelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__lowerCamelCase : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
if "class_embed" in name:
__lowerCamelCase : int = name.replace("class_embed" , "class_labels_classifier" )
if "bbox_embed" in name:
__lowerCamelCase : Optional[int] = name.replace("bbox_embed" , "bbox_predictor" )
if "vit.norm" in name:
__lowerCamelCase : Optional[int] = name.replace("vit.norm" , "vit.layernorm" )
return name
def lowercase_ ( _lowerCamelCase: dict , _lowerCamelCase: YolosForObjectDetection ) -> dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__lowerCamelCase : List[str] = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
__lowerCamelCase : Tuple = key.split("." )
__lowerCamelCase : Any = int(key_split[2] )
__lowerCamelCase : Optional[int] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
__lowerCamelCase : Optional[int] = val[:dim, :]
__lowerCamelCase : Union[str, Any] = val[
dim : dim * 2, :
]
__lowerCamelCase : Union[str, Any] = val[-dim:, :]
else:
__lowerCamelCase : Optional[Any] = val[:dim]
__lowerCamelCase : int = val[dim : dim * 2]
__lowerCamelCase : Tuple = val[-dim:]
else:
__lowerCamelCase : Optional[int] = val
return orig_state_dict
def lowercase_ ( ) -> torch.Tensor:
'''simple docstring'''
__lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCamelCase : Optional[Any] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowercase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: bool = False ) -> Any:
'''simple docstring'''
__lowerCamelCase : Optional[int] = get_yolos_config(_lowerCamelCase )
# load original state_dict
__lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location="cpu" )["model"]
# load 🤗 model
__lowerCamelCase : List[Any] = YolosForObjectDetection(_lowerCamelCase )
model.eval()
__lowerCamelCase : Optional[int] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
__lowerCamelCase : List[str] = 800 if yolos_name != "yolos_ti" else 512
__lowerCamelCase : Optional[int] = YolosImageProcessor(format="coco_detection" , size=_lowerCamelCase )
__lowerCamelCase : Any = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCamelCase : Union[str, Any] = model(**_lowerCamelCase )
__lowerCamelCase : List[Any] = outputs.logits, outputs.pred_boxes
__lowerCamelCase : Optional[int] = None, None
if yolos_name == "yolos_ti":
__lowerCamelCase : Dict = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
__lowerCamelCase : str = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
__lowerCamelCase : Optional[int] = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
__lowerCamelCase : str = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
__lowerCamelCase : Union[str, Any] = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
__lowerCamelCase : int = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
__lowerCamelCase : int = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
__lowerCamelCase : Any = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
__lowerCamelCase : int = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(F"""Unknown yolos_name: {yolos_name}""" )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , _lowerCamelCase , atol=1E-4 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowerCamelCase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
__lowerCamelCase : List[Any] = {
"yolos_ti": "yolos-tiny",
"yolos_s_200_pre": "yolos-small",
"yolos_s_300_pre": "yolos-small-300",
"yolos_s_dWr": "yolos-small-dwr",
"yolos_base": "yolos-base",
}
print("Pushing to the hub..." )
__lowerCamelCase : Tuple = model_mapping[yolos_name]
image_processor.push_to_hub(_lowerCamelCase , organization="hustvl" )
model.push_to_hub(_lowerCamelCase , organization="hustvl" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__A = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub) | 354 | """simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _snake_case ( a__ , unittest.TestCase ):
snake_case__ = DiTPipeline
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
snake_case__ = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
snake_case__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
snake_case__ = False
def lowerCamelCase__ ( self : Tuple ):
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=UpperCAmelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=UpperCAmelCase , )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[Any] = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any]=0 ):
if str(UpperCAmelCase ).startswith("mps" ):
__lowerCamelCase : List[str] = torch.manual_seed(UpperCAmelCase )
else:
__lowerCamelCase : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
__lowerCamelCase : str = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = "cpu"
__lowerCamelCase : int = self.get_dummy_components()
__lowerCamelCase : Optional[Any] = self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase )
__lowerCamelCase : List[Any] = pipe(**UpperCAmelCase ).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCamelCase : Optional[int] = np.array([0.2_9_4_6, 0.6_6_0_1, 0.4_3_2_9, 0.3_2_9_6, 0.4_1_4_4, 0.5_3_1_9, 0.7_2_7_3, 0.5_0_1_3, 0.4_4_5_7] )
__lowerCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1E-3 )
def lowerCamelCase__ ( self : Any ):
self._test_inference_batch_single_identical(relax_max_difference=UpperCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCamelCase__ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : str = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__lowerCamelCase : Tuple = ["vase", "umbrella", "white shark", "white wolf"]
__lowerCamelCase : Optional[int] = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__lowerCamelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__lowerCamelCase : Union[str, Any] = ["vase", "umbrella"]
__lowerCamelCase : int = pipe.get_label_ids(UpperCAmelCase )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Dict = pipe(UpperCAmelCase , generator=UpperCAmelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 64 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowerCamelCase_ ( _a : Union[str, Any] , _a : Optional[int] , _a : Optional[Any]=None , _a : Any=None , _a : str=None , _a : List[str]=None , _a : Any=None , _a : Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
UpperCAmelCase_ : Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
UpperCAmelCase_ : Tuple = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
UpperCAmelCase_ : str = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ : Optional[int] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ : str = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class _snake_case :
'''simple docstring'''
def __init__( self: Any ,lowerCamelCase_: str ,lowerCamelCase_: List[Any]=13 ,lowerCamelCase_: Dict=7 ,lowerCamelCase_: List[Any]=True ,lowerCamelCase_: Optional[Any]=False ,lowerCamelCase_: Optional[Any]=99 ,lowerCamelCase_: Optional[int]=16 ,lowerCamelCase_: Optional[Any]=2 ,lowerCamelCase_: List[Any]=4 ,lowerCamelCase_: Dict=4 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Optional[Any]=0.1 ,lowerCamelCase_: List[str]=0.1 ,lowerCamelCase_: List[Any]=32 ,lowerCamelCase_: Tuple=2 ,lowerCamelCase_: List[Any]=1 ,lowerCamelCase_: Union[str, Any]=0 ,lowerCamelCase_: Dict=0.0_2 ,) -> List[str]:
UpperCAmelCase_ : Tuple = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : List[str] = seq_length
UpperCAmelCase_ : int = is_training
UpperCAmelCase_ : str = use_labels
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Dict = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : List[str] = eos_token_id
UpperCAmelCase_ : Optional[int] = pad_token_id
UpperCAmelCase_ : Dict = bos_token_id
UpperCAmelCase_ : int = initializer_range
def A__ ( self: str ) -> str:
UpperCAmelCase_ : List[str] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ) ,3 ,self.vocab_size )
UpperCAmelCase_ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) ,dtype=np.intaa )) ,-1 )
UpperCAmelCase_ : List[str] = shift_tokens_right(lowerCamelCase_ ,1 ,2 )
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,initializer_range=self.initializer_range ,use_cache=lowerCamelCase_ ,)
UpperCAmelCase_ : List[str] = prepare_blenderbot_inputs_dict(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return config, inputs_dict
def A__ ( self: Tuple ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self: int ,lowerCamelCase_: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Tuple ) -> List[Any]:
UpperCAmelCase_ : int = 20
UpperCAmelCase_ : Optional[int] = model_class_name(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase_ : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : List[str] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" )
UpperCAmelCase_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase_ : Any = model.decode(
decoder_input_ids[:, :-1] ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase_ : List[Any] = model.decode(
decoder_input_ids[:, -1:] ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : List[Any] = model.decode(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F'''Max diff is {diff}''' )
def A__ ( self: Tuple ,lowerCamelCase_: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[Any] ) -> List[Any]:
UpperCAmelCase_ : List[Any] = 20
UpperCAmelCase_ : str = model_class_name(lowerCamelCase_ )
UpperCAmelCase_ : List[str] = model.encode(inputs_dict["""input_ids"""] )
UpperCAmelCase_ , UpperCAmelCase_ : Dict = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
UpperCAmelCase_ : Any = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] ,axis=-1 ,)
UpperCAmelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,)
UpperCAmelCase_ : List[str] = model.decode(
decoder_input_ids[:, :-1] ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,past_key_values=lowerCamelCase_ ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" )
UpperCAmelCase_ : Optional[Any] = model.decode(
decoder_input_ids[:, -1:] ,lowerCamelCase_ ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=lowerCamelCase_ ,decoder_position_ids=lowerCamelCase_ ,)
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ,lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 ,msg=F'''Max diff is {diff}''' )
@require_flax
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
A__ : int = 99
def A__ ( self: int ) -> Any:
UpperCAmelCase_ : List[str] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] ,dtype=np.intaa ,)
UpperCAmelCase_ : str = input_ids.shape[0]
UpperCAmelCase_ : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=24 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=32 ,decoder_ffn_dim=32 ,max_position_embeddings=48 ,eos_token_id=2 ,pad_token_id=1 ,bos_token_id=0 ,)
return config, input_ids, batch_size
def A__ ( self: int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self._get_config_and_data()
UpperCAmelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = lm_model(input_ids=lowerCamelCase_ )
UpperCAmelCase_ : str = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["""logits"""].shape ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size ,d_model=14 ,encoder_layers=2 ,decoder_layers=2 ,encoder_attention_heads=2 ,decoder_attention_heads=2 ,encoder_ffn_dim=8 ,decoder_ffn_dim=8 ,max_position_embeddings=48 ,)
UpperCAmelCase_ : Optional[int] = FlaxBlenderbotSmallForConditionalGeneration(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] ,dtype=np.intaa )
UpperCAmelCase_ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] ,dtype=np.intaa )
UpperCAmelCase_ : Any = lm_model(input_ids=lowerCamelCase_ ,decoder_input_ids=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["""logits"""].shape ,lowerCamelCase_ )
def A__ ( self: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] ,dtype=np.intaa )
UpperCAmelCase_ : int = shift_tokens_right(lowerCamelCase_ ,1 ,2 )
UpperCAmelCase_ : str = np.equal(lowerCamelCase_ ,1 ).astype(np.floataa ).sum()
UpperCAmelCase_ : List[str] = np.equal(lowerCamelCase_ ,1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape ,input_ids.shape )
self.assertEqual(lowerCamelCase_ ,n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] ,2 ).all() )
@require_flax
class _snake_case ( __snake_case , unittest.TestCase , __snake_case ):
'''simple docstring'''
A__ : Optional[Any] = True
A__ : Any = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A__ : Dict = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def A__ ( self: int ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = FlaxBlenderbotSmallModelTester(self )
def A__ ( self: List[Any] ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Optional[int] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
def A__ ( self: Dict ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : int = self._prepare_for_class(lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_ : Any = model_class(lowerCamelCase_ )
@jax.jit
def encode_jitted(lowerCamelCase_: Tuple ,lowerCamelCase_: Tuple=None ,**lowerCamelCase_: Union[str, Any] ):
return model.encode(input_ids=lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase_ : Tuple = encode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase_ : List[str] = encode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) ,len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ ,lowerCamelCase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
def A__ ( self: Any ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[str] = model_class(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] )
UpperCAmelCase_ : int = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCamelCase_: List[Any] ,lowerCamelCase_: Dict ,lowerCamelCase_: Dict ):
return model.decode(
decoder_input_ids=lowerCamelCase_ ,decoder_attention_mask=lowerCamelCase_ ,encoder_outputs=lowerCamelCase_ ,)
with self.subTest("""JIT Enabled""" ):
UpperCAmelCase_ : Any = decode_jitted(**lowerCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
UpperCAmelCase_ : Optional[Any] = decode_jitted(**lowerCamelCase_ ).to_tuple()
self.assertEqual(len(lowerCamelCase_ ) ,len(lowerCamelCase_ ) )
for jitted_output, output in zip(lowerCamelCase_ ,lowerCamelCase_ ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def A__ ( self: Dict ) -> Tuple:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : Any = model_class_name.from_pretrained("""facebook/blenderbot_small-90M""" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
UpperCAmelCase_ : Tuple = np.ones((1, 1) ) * model.config.eos_token_id
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 345 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _snake_case ( __snake_case , __snake_case , unittest.TestCase ):
'''simple docstring'''
A__ : Dict = AutoencoderKL
A__ : Optional[int] = "sample"
A__ : Tuple = 1E-2
@property
def A__ ( self: List[Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : str = 3
UpperCAmelCase_ : Any = (32, 32)
UpperCAmelCase_ : Optional[int] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase_ )
return {"sample": image}
@property
def A__ ( self: List[str] ) -> Tuple:
return (3, 32, 32)
@property
def A__ ( self: Optional[Any] ) -> Any:
return (3, 32, 32)
def A__ ( self: Any ) -> Tuple:
UpperCAmelCase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
UpperCAmelCase_ : int = self.dummy_input
return init_dict, inputs_dict
def A__ ( self: Optional[Any] ) -> int:
pass
def A__ ( self: str ) -> Any:
pass
@unittest.skipIf(torch_device == """mps""" ,"""Gradient checkpointing skipped on MPS""" )
def A__ ( self: Union[str, Any] ) -> Dict:
# enable deterministic behavior for gradient checkpointing
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase_ : List[Any] = self.model_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
assert not model.is_gradient_checkpointing and model.training
UpperCAmelCase_ : Optional[Any] = model(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
UpperCAmelCase_ : Any = torch.randn_like(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
UpperCAmelCase_ : str = self.model_class(**lowerCamelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCamelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
UpperCAmelCase_ : Optional[int] = model_a(**lowerCamelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
UpperCAmelCase_ : Dict = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
UpperCAmelCase_ : Dict = dict(model.named_parameters() )
UpperCAmelCase_ : Union[str, Any] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def A__ ( self: Optional[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : int = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" ,output_loading_info=lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) ,0 )
model.to(lowerCamelCase_ )
UpperCAmelCase_ : Dict = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A__ ( self: Optional[int] ) -> int:
UpperCAmelCase_ : Dict = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
UpperCAmelCase_ : Tuple = model.to(lowerCamelCase_ )
model.eval()
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.manual_seed(0 )
else:
UpperCAmelCase_ : Optional[int] = torch.Generator(device=lowerCamelCase_ ).manual_seed(0 )
UpperCAmelCase_ : str = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
UpperCAmelCase_ : int = image.to(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ,generator=lowerCamelCase_ ).sample
UpperCAmelCase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
UpperCAmelCase_ : Tuple = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
UpperCAmelCase_ : List[str] = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,rtol=1e-2 ) )
@slow
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: Any ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
return F'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCamelCase_ ) for s in shape] )}.npy'''
def A__ ( self: Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self: List[str] ,lowerCamelCase_: Optional[int]=0 ,lowerCamelCase_: List[Any]=(4, 3, 512, 512) ,lowerCamelCase_: Optional[Any]=False ) -> Optional[int]:
UpperCAmelCase_ : Tuple = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : Tuple = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCamelCase_ ,lowerCamelCase_ ) ) ).to(lowerCamelCase_ ).to(lowerCamelCase_ )
return image
def A__ ( self: List[Any] ,lowerCamelCase_: List[str]="CompVis/stable-diffusion-v1-4" ,lowerCamelCase_: Union[str, Any]=False ) -> Any:
UpperCAmelCase_ : Optional[Any] = """fp16""" if fpaa else None
UpperCAmelCase_ : str = torch.floataa if fpaa else torch.floataa
UpperCAmelCase_ : int = AutoencoderKL.from_pretrained(
lowerCamelCase_ ,subfolder="""vae""" ,torch_dtype=lowerCamelCase_ ,revision=lowerCamelCase_ ,)
model.to(lowerCamelCase_ ).eval()
return model
def A__ ( self: Dict ,lowerCamelCase_: Union[str, Any]=0 ) -> Optional[int]:
if torch_device == "mps":
return torch.manual_seed(lowerCamelCase_ )
return torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: List[Any] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: str ,lowerCamelCase_: Dict ) -> Tuple:
UpperCAmelCase_ : List[Any] = self.get_sd_vae_model()
UpperCAmelCase_ : int = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Tuple = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: List[str] ) -> Tuple:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,fpaa=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(lowerCamelCase_ ,generator=lowerCamelCase_ ,sample_posterior=lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : Optional[int] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def A__ ( self: Tuple ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[str] ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.get_sd_vae_model()
UpperCAmelCase_ : Dict = self.get_sd_image(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : str = model(lowerCamelCase_ ).sample
assert sample.shape == image.shape
UpperCAmelCase_ : List[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
UpperCAmelCase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: str ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[int] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : str = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : Any = sample[-1, -2:, :2, -2:].flatten().cpu()
UpperCAmelCase_ : Union[str, Any] = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def A__ ( self: str ,lowerCamelCase_: List[Any] ,lowerCamelCase_: Any ) -> Optional[Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
UpperCAmelCase_ : str = sample[-1, -2:, :2, -2:].flatten().float().cpu()
UpperCAmelCase_ : str = torch.tensor(lowerCamelCase_ )
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: List[Any] ,lowerCamelCase_: Union[str, Any] ) -> int:
UpperCAmelCase_ : Optional[Any] = self.get_sd_vae_model(fpaa=lowerCamelCase_ )
UpperCAmelCase_ : List[str] = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : List[str] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason="""xformers is not required when using PyTorch 2.0.""" )
def A__ ( self: Optional[Any] ,lowerCamelCase_: Dict ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = self.get_sd_vae_model()
UpperCAmelCase_ : Any = self.get_sd_image(lowerCamelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model.decode(lowerCamelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model.decode(lowerCamelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def A__ ( self: Union[str, Any] ,lowerCamelCase_: Any ,lowerCamelCase_: Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = self.get_sd_vae_model()
UpperCAmelCase_ : Optional[Any] = self.get_sd_image(lowerCamelCase_ )
UpperCAmelCase_ : str = self.get_generator(lowerCamelCase_ )
with torch.no_grad():
UpperCAmelCase_ : int = model.encode(lowerCamelCase_ ).latent_dist
UpperCAmelCase_ : Optional[Any] = dist.sample(generator=lowerCamelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
UpperCAmelCase_ : Tuple = sample[0, -1, -3:, -3:].flatten().cpu()
UpperCAmelCase_ : Optional[Any] = torch.tensor(lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(lowerCamelCase_ ,lowerCamelCase_ ,atol=lowerCamelCase_ )
| 345 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class __a ( lowerCamelCase_ ):
__snake_case : str = field(default="""audio-classification""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__snake_case : ClassVar[Features] = Features({"""audio""": Audio()} )
__snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
__snake_case : str = "audio"
__snake_case : str = "labels"
def A ( self : Optional[Any] , UpperCAmelCase : List[str] ):
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , lowerCAmelCase__ ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
lowerCAmelCase_ : List[str] = copy.deepcopy(self )
lowerCAmelCase_ : str = self.label_schema.copy()
lowerCAmelCase_ : Any = features[self.label_column]
lowerCAmelCase_ : List[str] = label_schema
return task_template
@property
def A ( self : Dict ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 365 |
def __UpperCamelCase ( lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase_ : Any = 0
for ch in input_str:
lowerCAmelCase_ : Any = ord(lowercase__ )
lowerCAmelCase_ : Dict = pow(2 , lowercase__ )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 | 0 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int , __UpperCamelCase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = str(bin(__UpperCamelCase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE__ = max(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCamelCase ) , b_binary.zfill(__UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 | from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
__lowerCamelCase : str = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
__lowerCamelCase : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
__lowerCamelCase : List[Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
__lowerCamelCase : Tuple = requests.get(image_url).content
__lowerCamelCase : Union[str, Any] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 219 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
UpperCAmelCase__ : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( _UpperCAmelCase ):
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ):
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
@torch.no_grad()
def __call__( self , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 100 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , ):
if audio_length_in_s is None:
lowerCamelCase =self.unet.config.sample_size / self.unet.config.sample_rate
lowerCamelCase =audio_length_in_s * self.unet.config.sample_rate
lowerCamelCase =2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it\'s bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
lowerCamelCase =int(lowercase_ )
if sample_size % down_scale_factor != 0:
lowerCamelCase =(
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
lowerCamelCase =int(lowercase_ )
lowerCamelCase =next(iter(self.unet.parameters() ) ).dtype
lowerCamelCase =(batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase =randn_tensor(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ )
# set step values
self.scheduler.set_timesteps(lowercase_ , device=audio.device )
lowerCamelCase =self.scheduler.timesteps.to(lowercase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase =self.unet(lowercase_ , lowercase_ ).sample
# 2. compute previous image: x_t -> t_t-1
lowerCamelCase =self.scheduler.step(lowercase_ , lowercase_ , lowercase_ ).prev_sample
lowerCamelCase =audio.clamp(-1 , 1 ).float().cpu().numpy()
lowerCamelCase =audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowercase_ )
| 371 |
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowercase ( _UpperCAmelCase = "isbn/0140328726" ) -> dict:
lowerCamelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCamelCase =F"""{olid} is not a valid Open Library olid"""
raise ValueError(_UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowercase ( _UpperCAmelCase ) -> dict:
lowerCamelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCamelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCamelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =""", """.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ : List[str] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ : Dict =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 262 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
__UpperCamelCase : Tuple = TypeVar('''T''')
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
lowercase__ = 42 # Cache store of keys
lowercase__ = 42 # References of the keys in cache
lowercase__ = 10 # Maximum capacity of cache
def __init__( self : Dict ,lowercase_ : int ):
lowerCAmelCase__ : str = deque()
lowerCAmelCase__ : Any = set()
if not n:
lowerCAmelCase__ : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
lowerCAmelCase__ : int = n
def __lowerCAmelCase ( self : str ,lowercase_ : T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
lowerCAmelCase__ : Any = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def __lowerCAmelCase ( self : int ):
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Tuple ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 106 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
UpperCAmelCase_ : Union[str, Any] = 16
UpperCAmelCase_ : Any = 32
def _A (__a , __a = 16 , __a = "bert-base-cased" ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = AutoTokenizer.from_pretrained(__a )
SCREAMING_SNAKE_CASE_ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : str = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ : Tuple = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : int = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
SCREAMING_SNAKE_CASE_ : Any = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def _A (__a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__a )
SCREAMING_SNAKE_CASE_ : str = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
SCREAMING_SNAKE_CASE_ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
SCREAMING_SNAKE_CASE_ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
SCREAMING_SNAKE_CASE_ : List[Any] = metric.compute()
return eval_metric["accuracy"]
def _A (__a , __a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : str = config['''lr''']
SCREAMING_SNAKE_CASE_ : int = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE_ : Any = int(config['''seed'''] )
SCREAMING_SNAKE_CASE_ : str = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE_ : List[Any] = args.model_name_or_path
set_seed(__a )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : Tuple = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : str = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
SCREAMING_SNAKE_CASE_ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
SCREAMING_SNAKE_CASE_ : List[str] = 1
SCREAMING_SNAKE_CASE_ : List[str] = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
SCREAMING_SNAKE_CASE_ : Optional[int] = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
SCREAMING_SNAKE_CASE_ : int = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
# We also need to keep track of the stating epoch so files are named properly
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : int = evaluate.load('''glue''' , '''mrpc''' )
SCREAMING_SNAKE_CASE_ : Tuple = num_epochs
if args.partial_train_epoch is not None:
SCREAMING_SNAKE_CASE_ : Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
SCREAMING_SNAKE_CASE_ : Optional[int] = args.resume_from_checkpoint.split('''epoch_''' )[1]
SCREAMING_SNAKE_CASE_ : int = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
SCREAMING_SNAKE_CASE_ : List[Any] = int(__a ) + 1
SCREAMING_SNAKE_CASE_ : int = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE_ : int = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
SCREAMING_SNAKE_CASE_ : Dict = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : Tuple = model(**__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = outputs.loss
SCREAMING_SNAKE_CASE_ : Dict = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
SCREAMING_SNAKE_CASE_ : Dict = f'epoch_{epoch}'
SCREAMING_SNAKE_CASE_ : int = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
SCREAMING_SNAKE_CASE_ : Tuple = evaluation_loop(__a , __a , __a , __a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accuracy
SCREAMING_SNAKE_CASE_ : Optional[Any] = lr_scheduler.get_lr()[0]
SCREAMING_SNAKE_CASE_ : List[Any] = optimizer.param_groups[0]['''lr''']
SCREAMING_SNAKE_CASE_ : Optional[int] = epoch
SCREAMING_SNAKE_CASE_ : Union[str, Any] = overall_step
accelerator.print(f'epoch {epoch}:' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , '''w''' ) as f:
json.dump(__a , __a )
def _A () -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
SCREAMING_SNAKE_CASE_ : str = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 318 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
UpperCAmelCase_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : int=None , lowercase_ : Dict=1):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = dataset
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowercase_) if n_tasks is None else n_tasks
SCREAMING_SNAKE_CASE_ : Optional[int] = n_copies
def __iter__( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
for task in range(self.n_tasks):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip())
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.tokenizer(lowercase_ , padding=lowercase_ , return_tensors='''pt''')
for task in range(self.n_tasks):
for _ in range(self.n_copies):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = start_length
SCREAMING_SNAKE_CASE_ : List[Any] = eof_strings
SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : int , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.batch_decode(input_ids[:, self.start_length :])
SCREAMING_SNAKE_CASE_ : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings))
return all(lowercase_)
def _A (__a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = re.split('''(%s)''' % '''|'''.join(__a ) , __a )
# last string should be ""
return "".join(string_list[:-2] )
def _A (__a , __a , __a , __a , __a , __a=20 , **__a ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = batch['''ids'''].shape[-1]
SCREAMING_SNAKE_CASE_ : Tuple = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__a , **__a )
# each task is generated batch_size times
SCREAMING_SNAKE_CASE_ : List[Any] = batch['''task_id'''].repeat(__a )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.pad_across_processes(
__a , dim=1 , pad_index=tokenizer.pad_token_id )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
SCREAMING_SNAKE_CASE_ : int = generated_tokens.cpu().numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a , __a ):
gen_token_dict[task].append(__a )
SCREAMING_SNAKE_CASE_ : int = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def _A () -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = HfArgumentParser(__a )
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
SCREAMING_SNAKE_CASE_ : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
SCREAMING_SNAKE_CASE_ : str = '''false'''
if args.num_workers is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
SCREAMING_SNAKE_CASE_ : Tuple = Accelerator()
set_seed(args.seed , device_specific=__a )
# Load model and tokenizer
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE_ : Dict = tokenizer.eos_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
SCREAMING_SNAKE_CASE_ : List[str] = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __a , __a )] ),
}
# Load evaluation dataset and metric
SCREAMING_SNAKE_CASE_ : Optional[int] = load_dataset('''openai_humaneval''' )
SCREAMING_SNAKE_CASE_ : str = load_metric('''code_eval''' )
SCREAMING_SNAKE_CASE_ : int = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
SCREAMING_SNAKE_CASE_ : List[str] = args.n_samples // args.batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = TokenizedDataset(__a , human_eval['''test'''] , n_copies=__a , n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(__a , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
SCREAMING_SNAKE_CASE_ : Any = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(__a , __a )
SCREAMING_SNAKE_CASE_ : List[Any] = complete_code(
__a , __a , __a , __a , n_tasks=__a , batch_size=args.batch_size , **__a , )
if accelerator.is_main_process:
SCREAMING_SNAKE_CASE_ : int = []
for task in tqdm(range(__a ) ):
SCREAMING_SNAKE_CASE_ : Tuple = human_eval['''test'''][task]['''test''']
SCREAMING_SNAKE_CASE_ : Tuple = f'check({human_eval["test"][task]["entry_point"]})'
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = code_eval_metric.compute(
references=__a , predictions=__a , num_workers=args.num_workers )
print(f'Results: {pass_at_k}' )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__a , __a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 318 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a__: List[Any] = (3, 9, -11, 0, 7, 5, 1, -1)
a__: Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase ):
A__ = None
for i in sorted(__lowerCamelCase,reverse=__lowerCamelCase ):
A__ = Node(__lowerCamelCase,self.head )
def __iter__( self ):
A__ = self.head
while node:
yield node.data
A__ = node.next_node
def __len__( self ):
return sum(1 for _ in self )
def __str__( self ):
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def UpperCamelCase__( UpperCamelCase__ : SortedLinkedList , UpperCamelCase__ : SortedLinkedList )->SortedLinkedList:
return SortedLinkedList(list(UpperCamelCase__ ) + list(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a__: str = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 193 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(__lowerCamelCase ):
A__ = AutoConfig.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase,__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in ["bert-base-cased", "bert-large-uncased"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxBertModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
@slow
def UpperCamelCase ( self ):
for model_name in ["roberta-base", "roberta-large"]:
A__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
A__ = FlaxRobertaModel.from_pretrained(__lowerCamelCase )
A__ = tokenizer('''Do you support jax jitted function?''',return_tensors=TensorType.JAX )
@jax.jit
def eval(**__lowerCamelCase ):
return model(**__lowerCamelCase )
eval(**__lowerCamelCase ).block_until_ready()
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''bert-base is not a local folder and is not a valid model identifier''' ):
A__ = FlaxAutoModel.from_pretrained('''bert-base''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
A__ = FlaxAutoModel.from_pretrained(__lowerCamelCase,revision='''aaaaaa''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(
__lowerCamelCase,'''hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack''',):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def UpperCamelCase ( self ):
with self.assertRaisesRegex(__lowerCamelCase,'''Use `from_pt=True` to load this model''' ):
A__ = FlaxAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
| 193 | 1 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= Mock()
__lowercase= conn, Mock()
__lowercase= iter([1, None] )
__lowercase= lambda lowercase__ : next(A__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=A__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 351 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
lowerCAmelCase = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class A ( A_ ):
UpperCamelCase_ : Optional[int] ='''albert'''
def __init__(self , lowerCAmelCase=3_0_0_0_0 , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_0_9_6 , lowerCAmelCase=1_2 , lowerCAmelCase=1 , lowerCAmelCase=6_4 , lowerCAmelCase=1_6_3_8_4 , lowerCAmelCase=1 , lowerCAmelCase="gelu_new" , lowerCAmelCase=0 , lowerCAmelCase=0 , lowerCAmelCase=5_1_2 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=0.1 , lowerCAmelCase="absolute" , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase=3 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
__lowercase= vocab_size
__lowercase= embedding_size
__lowercase= hidden_size
__lowercase= num_hidden_layers
__lowercase= num_hidden_groups
__lowercase= num_attention_heads
__lowercase= inner_group_num
__lowercase= hidden_act
__lowercase= intermediate_size
__lowercase= hidden_dropout_prob
__lowercase= attention_probs_dropout_prob
__lowercase= max_position_embeddings
__lowercase= type_vocab_size
__lowercase= initializer_range
__lowercase= layer_norm_eps
__lowercase= classifier_dropout_prob
__lowercase= position_embedding_type
class A ( A_ ):
@property
def _A (self ):
if self.task == "multiple-choice":
__lowercase= {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowercase= {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 304 | 0 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Any=None , _lowerCAmelCase : Union[str, Any]="resnet50" , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Any=32 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : Union[str, Any]=True , ):
A = parent
A = out_indices if out_indices is not None else [4]
A = stage_names
A = out_features
A = backbone
A = batch_size
A = image_size
A = num_channels
A = use_pretrained_backbone
A = is_training
def A (self : Optional[int] ):
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = self.get_config()
return config, pixel_values
def A (self : Any ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def A (self : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Dict ):
A = TimmBackbone(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(_lowerCAmelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def A (self : Tuple ):
A = self.prepare_config_and_inputs()
A , A = config_and_inputs
A = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __UpperCAmelCase ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (TimmBackbone,) if is_torch_available() else ()
__lowerCAmelCase = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def A (self : Tuple ):
A = TimmBackboneModelTester(self )
A = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase )
def A (self : List[str] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A (self : Dict ):
A = """resnet18"""
A = """microsoft/resnet-18"""
A = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase )
A = AutoBackbone.from_pretrained(_lowerCAmelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
A = AutoBackbone.from_pretrained(_lowerCAmelCase , use_timm_backbone=_lowerCAmelCase , out_indices=[1, 2, 3] )
A = AutoBackbone.from_pretrained(_lowerCAmelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def A (self : Dict ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def A (self : Optional[Any] ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def A (self : str ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def A (self : Tuple ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def A (self : List[Any] ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def A (self : Optional[Any] ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A (self : List[str] ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def A (self : int ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def A (self : int ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A (self : Dict ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def A (self : int ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def A (self : Optional[Any] ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def A (self : Tuple ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def A (self : Union[str, Any] ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def A (self : Tuple ):
pass
def A (self : int ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def A (self : List[Any] ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = self.has_attentions
# no need to test all models as different heads yield the same functionality
A = self.all_model_classes[0]
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
A = self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase )
A = model(**_lowerCAmelCase )
A = outputs[0][-1]
# Encoder-/Decoder-only models
A = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
A = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCAmelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def A (self : Any ):
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(**_lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
A = copy.deepcopy(_lowerCAmelCase )
A = None
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(**_lowerCAmelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
A = copy.deepcopy(_lowerCAmelCase )
A = False
A = model_class(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
A = model(**_lowerCAmelCase )
| 258 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowerCamelCase : List[str] = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
_lowerCamelCase : List[Any] = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
_lowerCamelCase : Dict = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def A (self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A (self : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any ):
A = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
A = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
A = evaluate(dataset=_lowerCAmelCase , predictions=_lowerCAmelCase )
return score
| 258 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "bridgetower_vision_model"
def __init__(self , __a=7_68 , __a=12 , __a=3 , __a=16 , __a=2_88 , __a=1 , __a=1e-0_5 , __a=False , __a=True , __a=False , **__a , ) -> str:
super().__init__(**__a )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_channels
UpperCamelCase = patch_size
UpperCamelCase = image_size
UpperCamelCase = initializer_factor
UpperCamelCase = layer_norm_eps
UpperCamelCase = stop_gradient
UpperCamelCase = share_layernorm
UpperCamelCase = remove_last_layer
@classmethod
def snake_case_ (cls , __a , **__a ) -> "PretrainedConfig":
UpperCamelCase , UpperCamelCase = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "bridgetower_text_model"
def __init__(self , __a=5_02_65 , __a=7_68 , __a=12 , __a=12 , __a=1 , __a=30_72 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_14 , __a=1 , __a=1e-0_5 , __a=1 , __a=0 , __a=2 , __a="absolute" , __a=True , **__a , ) -> str:
super().__init__(**__a )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = initializer_factor
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = use_cache
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
@classmethod
def snake_case_ (cls , __a , **__a ) -> "PretrainedConfig":
UpperCamelCase , UpperCamelCase = cls.get_config_dict(__a , **__a )
if config_dict.get("model_type" ) == "bridgetower":
UpperCamelCase = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "bridgetower"
def __init__(self , __a=True , __a="gelu" , __a=7_68 , __a=1 , __a=1e-0_5 , __a=False , __a="add" , __a=12 , __a=6 , __a=False , __a=False , __a=None , __a=None , **__a , ) -> Any:
# TODO: remove this once the Hub files are updated.
UpperCamelCase = kwargs.pop("text_config_dict" , __a )
UpperCamelCase = kwargs.pop("vision_config_dict" , __a )
super().__init__(**__a )
UpperCamelCase = share_cross_modal_transformer_layers
UpperCamelCase = hidden_act
UpperCamelCase = hidden_size
UpperCamelCase = initializer_factor
UpperCamelCase = layer_norm_eps
UpperCamelCase = share_link_tower_layers
UpperCamelCase = link_tower_type
UpperCamelCase = num_attention_heads
UpperCamelCase = num_hidden_layers
UpperCamelCase = tie_word_embeddings
UpperCamelCase = init_layernorm_from_vision_encoder
if text_config is None:
UpperCamelCase = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." )
if vision_config is None:
UpperCamelCase = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." )
UpperCamelCase = BridgeTowerTextConfig(**__a )
UpperCamelCase = BridgeTowerVisionConfig(**__a )
@classmethod
def snake_case_ (cls , __a , __a , **__a ) -> List[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def snake_case_ (self ) -> Dict:
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.text_config.to_dict()
UpperCamelCase = self.vision_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 355 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = "falcon"
UpperCAmelCase_ = ["past_key_values"]
def __init__(self , __a=6_50_24 , __a=45_44 , __a=32 , __a=71 , __a=1e-5 , __a=0.02 , __a=True , __a=0.0 , __a=0.0 , __a=None , __a=False , __a=False , __a=True , __a=True , __a=False , __a=11 , __a=11 , **__a , ) -> Union[str, Any]:
UpperCamelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCamelCase = kwargs.pop("n_embed" , __a )
UpperCamelCase = hidden_size if n_embed is None else n_embed
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_range
UpperCamelCase = use_cache
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = bos_token_id
UpperCamelCase = eos_token_id
UpperCamelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCamelCase = alibi
UpperCamelCase = new_decoder_architecture
UpperCamelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCamelCase = parallel_attn
UpperCamelCase = bias
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@property
def snake_case_ (self ) -> Optional[int]:
return self.hidden_size // self.num_attention_heads
@property
def snake_case_ (self ) -> Dict:
return not self.alibi
| 244 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
UpperCAmelCase : Optional[int] = {
"squeezebert/squeezebert-uncased": 5_12,
"squeezebert/squeezebert-mnli": 5_12,
"squeezebert/squeezebert-mnli-headless": 5_12,
}
UpperCAmelCase : Dict = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[Any] = SqueezeBertTokenizer
def __init__( self , A=None , A=None , A=True , A="[UNK]" , A="[SEP]" , A="[PAD]" , A="[CLS]" , A="[MASK]" , A=True , A=None , **A , ) -> int:
'''simple docstring'''
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
lowerCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A ) != tokenize_chinese_chars
):
lowerCamelCase = getattr(A , normalizer_state.pop("""type""" ) )
lowerCamelCase = do_lower_case
lowerCamelCase = strip_accents
lowerCamelCase = tokenize_chinese_chars
lowerCamelCase = normalizer_class(**A )
lowerCamelCase = do_lower_case
def __A ( self , A , A=None ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , A , A = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase = [self.sep_token_id]
lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
lowerCamelCase = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 252 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Tuple = "naver-clova-ix/donut-base-finetuned-docvqa"
UpperCamelCase : Optional[int] = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
UpperCamelCase : Optional[Any] = "document_qa"
UpperCamelCase : Any = AutoProcessor
UpperCamelCase : Optional[int] = VisionEncoderDecoderModel
UpperCamelCase : Any = ["image", "text"]
UpperCamelCase : str = ["text"]
def __init__( self , *A , **A ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*A , **A )
def __A ( self , A , A ) -> int:
'''simple docstring'''
lowerCamelCase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase = task_prompt.replace("""{user_input}""" , A )
lowerCamelCase = self.pre_processor.tokenizer(
A , add_special_tokens=A , return_tensors="""pt""" ).input_ids
lowerCamelCase = self.pre_processor(A , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , A ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=A , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=A , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=A , ).sequences
def __A ( self , A ) -> int:
'''simple docstring'''
lowerCamelCase = self.pre_processor.batch_decode(A )[0]
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
lowerCamelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
lowerCamelCase = re.sub(r"""<.*?>""" , """""" , A , count=1 ).strip() # remove first task start token
lowerCamelCase = self.pre_processor.tokenajson(A )
return sequence["answer"]
| 252 | 1 |
'''simple docstring'''
__snake_case : List[Any] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
__snake_case : List[str] = ['a', 'b', 'c', 'd', 'e']
def __lowerCamelCase ( __snake_case : Union[str, Any], __snake_case : Optional[int], __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
A__ : List[Any] =start
# add current to visited
visited.append(__snake_case )
A__ : Any =edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
A__ : List[str] =topological_sort(__snake_case, __snake_case, __snake_case )
# if all neighbors visited add current to sort
sort.append(__snake_case )
# if all vertices haven't been visited select a new one to visit
if len(__snake_case ) != len(__snake_case ):
for vertice in vertices:
if vertice not in visited:
A__ : str =topological_sort(__snake_case, __snake_case, __snake_case )
# return sort
return sort
if __name__ == "__main__":
__snake_case : Tuple = topological_sort('a', [], [])
print(sort)
| 136 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
__snake_case : Optional[int] = WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __lowerCamelCase ( __snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any =test_results.split(""" """ )
A__ : List[Any] =0
A__ : Optional[int] =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
A__ : Dict =expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(__snake_case ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __lowerCamelCase ( __snake_case : str ) -> Optional[int]:
"""simple docstring"""
A__ : Dict ={}
A__ : List[Any] =None
A__ : Any =False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""", __snake_case ):
A__ : List[str] =True
A__ : Optional[int] =line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
A__ : List[str] =line
A__ : int =False
return failures
class lowerCamelCase :
'''simple docstring'''
def __init__( self : str , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict ) -> Dict:
'''simple docstring'''
A__ : Any =title
A__ : List[Any] =doc_test_results["""time_spent"""].split(""",""" )[0]
A__ : str =doc_test_results["""success"""]
A__ : str =doc_test_results["""failures"""]
A__ : Optional[int] =self.n_success + self.n_failures
# Failures and success of the modeling tests
A__ : List[Any] =doc_test_results
@property
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
A__ : List[str] =[self._time_spent]
A__ : str =0
for time in time_spent:
A__ : Union[str, Any] =time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(lowerCAmelCase_ ) == 1:
A__ : str =[0, 0, time_parts[0]]
A__ , A__ , A__ : int =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
A__ , A__ , A__ : Optional[int] =total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"{int(lowerCAmelCase_ )}h{int(lowerCAmelCase_ )}m{int(lowerCAmelCase_ )}s"
@property
def lowercase__ ( self : Any ) -> Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
A__ : Optional[Any] =40
A__ : List[Any] ={k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )}
A__ : Union[str, Any] =""""""
for category, failures in category_failures.items():
if len(lowerCAmelCase_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(lowerCAmelCase_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
A__ : Tuple =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(lowerCAmelCase_ )
@staticmethod
def lowercase__ ( ) -> Any:
'''simple docstring'''
A__ : Dict =[
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(lowerCAmelCase_ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=lowerCAmelCase_ , )
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
A__ : Tuple =f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else """All tests passed."""
A__ : Optional[int] =client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=lowerCAmelCase_ , )
def lowercase__ ( self : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[Any] ) -> Any:
'''simple docstring'''
A__ : int =""""""
for key, value in failures.items():
A__ : Optional[int] =value[:2_00] + """ [Truncated]""" if len(lowerCAmelCase_ ) > 2_50 else value
failures_text += f"*{key}*\n_{value}_\n\n"
A__ : List[Any] =job_name
A__ : Dict ={"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
A__ : Dict ={
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
A__ : Optional[Any] =self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
A__ : Union[str, Any] =sorted(self.doc_test_results.items() , key=lambda lowerCAmelCase_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
A__ : Optional[Any] =f"*Num failures* :{len(job_result['failed'] )} \n"
A__ : Tuple =job_result["""failures"""]
A__ : Optional[Any] =self.get_reply_blocks(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , text=lowerCAmelCase_ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"Results for {job}" , blocks=lowerCAmelCase_ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ : Optional[Any] =os.environ["""GITHUB_RUN_ID"""]
A__ : Tuple =f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
A__ : List[str] =requests.get(__snake_case ).json()
A__ : List[str] ={}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
A__ : Optional[int] =math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__snake_case ):
A__ : List[Any] =requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""", __snake_case )
return {}
def __lowerCamelCase ( __snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
A__ : Any ={}
if os.path.exists(__snake_case ):
A__ : str =os.listdir(__snake_case )
for file in files:
try:
with open(os.path.join(__snake_case, __snake_case ), encoding="""utf-8""" ) as f:
A__ : Tuple =f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(__snake_case, __snake_case )}." ) from e
return _artifact
def __lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
class lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase_ : str ) -> Any:
'''simple docstring'''
A__ : List[Any] =name
A__ : str =[]
def __str__( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
return self.name
def lowercase__ ( self : Any , lowerCAmelCase_ : str ) -> Tuple:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
A__ : Dict[str, Artifact] ={}
A__ : int =filter(os.path.isdir, os.listdir() )
for directory in directories:
A__ : List[Any] =directory
if artifact_name not in _available_artifacts:
A__ : str =Artifact(__snake_case )
_available_artifacts[artifact_name].add_path(__snake_case )
return _available_artifacts
if __name__ == "__main__":
__snake_case : List[str] = get_job_links()
__snake_case : int = retrieve_available_artifacts()
__snake_case : Union[str, Any] = collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
__snake_case : Dict = {
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
__snake_case : List[Any] = github_actions_job_links.get('run_doctests')
__snake_case : Tuple = available_artifacts['doc_tests_gpu_test_reports'].paths[0]
__snake_case : Optional[int] = retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
__snake_case , __snake_case , __snake_case : Optional[Any] = handle_test_results(artifact['stats'])
__snake_case : Optional[Any] = failed
__snake_case : Union[str, Any] = success
__snake_case : Union[str, Any] = time_spent[1:-1] + ', '
__snake_case : int = extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
__snake_case : Optional[int] = line.replace('FAILED ', '')
__snake_case : str = line.split()[0].replace('\n', '')
if "::" in line:
__snake_case , __snake_case : Optional[Any] = line.split('::')
else:
__snake_case , __snake_case : Any = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
__snake_case : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
__snake_case : List[str] = all_failures[test] if test in all_failures else 'N/A'
__snake_case : Optional[Any] = failure
break
__snake_case : int = Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 136 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case ( __a ):
'''simple docstring'''
snake_case_ : Tuple = ["""image_processor""", """tokenizer"""]
snake_case_ : List[Any] = """LayoutLMv2ImageProcessor"""
snake_case_ : Any = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self : Optional[int] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : str) -> Dict:
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , a_ , )
_snake_case : List[Any] = kwargs.pop("""feature_extractor""")
_snake_case : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""")
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""")
super().__init__(a_ , a_)
def __call__( self : List[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowerCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , lowerCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : int = 0 , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : Optional[bool] = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, TensorType]] = None , **lowerCAmelCase : List[Any] , ) -> Tuple:
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes """
"""if you initialized the image processor with apply_ocr set to True.""")
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""")
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("""You cannot return overflowing tokens without returning the offsets mapping.""")
# first, apply the image processor
_snake_case : List[Any] = self.image_processor(images=a_ , return_tensors=a_)
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a_ , a_):
_snake_case : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case : Union[str, Any] = features["""words"""]
_snake_case : List[Any] = self.tokenizer(
text=text if text is not None else features["""words"""] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["""boxes"""] , word_labels=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel values
_snake_case : int = features.pop("""pixel_values""")
if return_overflowing_tokens is True:
_snake_case : int = self.get_overflowing_images(a_ , encoded_inputs["""overflow_to_sample_mapping"""])
_snake_case : Optional[int] = images
return encoded_inputs
def UpperCamelCase_ ( self : List[Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_snake_case : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if len(a_) != len(a_):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
F''' {len(a_)} and {len(a_)}''')
return images_with_overflow
def UpperCamelCase_ ( self : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Dict) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_)
def UpperCamelCase_ ( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Dict) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_)
@property
def UpperCamelCase_ ( self : List[str]) -> List[str]:
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCamelCase_ ( self : int) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , a_ , )
return self.image_processor_class
@property
def UpperCamelCase_ ( self : Dict) -> Optional[int]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , a_ , )
return self.image_processor
| 317 |
"""simple docstring"""
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : Any=7 ):
"""simple docstring"""
_snake_case : Any = None
if token is not None:
_snake_case : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"Bearer {token}"}
# The id of a workflow (not of a workflow run)
_snake_case : List[str] = """636036"""
_snake_case : Union[str, Any] = F"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"
_snake_case : str = requests.get(snake_case__ , headers=snake_case__ ).json()
return result["workflow_runs"]
def UpperCAmelCase__ (snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : str = get_daily_ci_runs(snake_case__ )
_snake_case : str = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
_snake_case : List[str] = workflow_run["""id"""]
break
return workflow_run_id
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] ):
"""simple docstring"""
_snake_case : Optional[Any] = get_last_daily_ci_runs(snake_case__ )
if workflow_run_id is not None:
_snake_case : Optional[Any] = get_artifacts_links(worflow_run_id=snake_case__ , token=snake_case__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
_snake_case : Optional[int] = artifacts_links[artifact_name]
download_artifact(
artifact_name=snake_case__ , artifact_url=snake_case__ , output_dir=snake_case__ , token=snake_case__ )
def UpperCAmelCase__ (snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int ):
"""simple docstring"""
get_last_daily_ci_artifacts(snake_case__ , snake_case__ , snake_case__ )
_snake_case : int = {}
for artifact_name in artifact_names:
_snake_case : int = os.path.join(snake_case__ , F"{artifact_name}.zip" )
if os.path.isfile(snake_case__ ):
_snake_case : Tuple = {}
with zipfile.ZipFile(snake_case__ ) as z:
for filename in z.namelist():
if not os.path.isdir(snake_case__ ):
# read the file
with z.open(snake_case__ ) as f:
_snake_case : Any = f.read().decode("""UTF-8""" )
return results
| 64 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : List[Any] =["torch", "transformers", "onnx"]
def __init__( self : Dict , *lowerCAmelCase : Any , **lowerCAmelCase : List[str] ) -> Any:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *lowerCAmelCase : List[Any] , **lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : Dict =["torch", "transformers", "onnx"]
def __init__( self : List[str] , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *lowerCAmelCase : List[str] , **lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : Optional[Any] =["torch", "transformers", "onnx"]
def __init__( self : List[str] , *lowerCAmelCase : Any , **lowerCAmelCase : Tuple ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *lowerCAmelCase : List[str] , **lowerCAmelCase : Tuple ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : List[Any] =["torch", "transformers", "onnx"]
def __init__( self : Any , *lowerCAmelCase : Dict , **lowerCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *lowerCAmelCase : Tuple , **lowerCAmelCase : Optional[Any] ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : List[Any] =["torch", "transformers", "onnx"]
def __init__( self : List[Any] , *lowerCAmelCase : Dict , **lowerCAmelCase : Optional[Any] ) -> Dict:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[int] , *lowerCAmelCase : str , **lowerCAmelCase : int ) -> List[str]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class SCREAMING_SNAKE_CASE ( metaclass=a_ ):
"""simple docstring"""
lowerCamelCase : Dict =["torch", "transformers", "onnx"]
def __init__( self : str , *lowerCAmelCase : List[Any] , **lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , *lowerCAmelCase : Tuple , **lowerCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *lowerCAmelCase : Optional[Any] , **lowerCAmelCase : str ) -> str:
"""simple docstring"""
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 139 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =(EulerDiscreteScheduler,)
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Tuple = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = output.prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : List[Any] = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Dict = output.prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : Dict = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Any = output.prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : int = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : List[Any] = output.prev_sample
__lowerCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 139 | 1 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowercase : Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
__lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
__lowercase : bool = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
__lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''whether to use adafactor'''} )
__lowercase : Optional[float] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
__lowercase : Optional[float] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
__lowercase : Optional[float] = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
__lowercase : Optional[float] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
__lowercase : Optional[str] = field(
default='''linear''' , metadata={'''help''': f"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
| 37 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : str = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def __init__( self : Dict , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ):
"""simple docstring"""
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 28 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def a ( __a , __a=10 ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Dict = []
for _ in range(__a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def a ( __a , __a=10 ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = []
for step in range(__a ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase__ :List[str] = os.path.join(__a , '''schedule.bin''' )
torch.save(scheduler.state_dict() , __a )
UpperCamelCase__ :Any = torch.load(__a )
scheduler.load_state_dict(__a )
return lrs
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
UpperCamelCase__ :int = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase__ :Optional[int] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase__ :Union[str, Any] = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
UpperCamelCase__ :Optional[Any] = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Dict = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase_ )
UpperCamelCase__ :Any = torch.tensor([0.4, 0.2, -0.5] )
UpperCamelCase__ :Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
UpperCamelCase__ :Union[str, Any] = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase_ , weight_decay=0.0 , relative_step=UpperCamelCase_ , scale_parameter=UpperCamelCase_ , warmup_init=UpperCamelCase_ , )
for _ in range(1000 ):
UpperCamelCase__ :Dict = criterion(UpperCamelCase_ , UpperCamelCase_ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_a = nn.Linear(50 , 50 ) if is_torch_available() else None
_a = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_a = 10
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
'''simple docstring'''
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for a, b in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertAlmostEqual(UpperCamelCase_ , UpperCamelCase_ , delta=UpperCamelCase_ , msg=UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
UpperCamelCase__ :str = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
UpperCamelCase__ , UpperCamelCase__ :Tuple = data
UpperCamelCase__ :Tuple = scheduler_func(self.optimizer , **UpperCamelCase_ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
UpperCamelCase__ :List[Any] = unwrap_schedule(UpperCamelCase_ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase_ , UpperCamelCase_ , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
UpperCamelCase__ :Dict = scheduler_func(self.optimizer , **UpperCamelCase_ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase_ ) # wrap to test picklability of the schedule
UpperCamelCase__ :List[str] = unwrap_and_save_reload_schedule(UpperCamelCase_ , self.num_steps )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ , msg=F'''failed for {scheduler_func} in save and reload''' )
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = fn
def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return self.fn(*UpperCamelCase_ , **UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = list(map(self , scheduler.lr_lambdas ) ) | 219 |
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
__snake_case = '''docs/source/en/_toctree.yml'''
def a ( __a ) -> int:
'''simple docstring'''
UpperCamelCase__ :int = defaultdict(__a )
UpperCamelCase__ :int = []
UpperCamelCase__ :int = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} )
else:
new_doc_list.append(__a )
UpperCamelCase__ :Union[str, Any] = new_doc_list
UpperCamelCase__ :Tuple = [key for key, value in counts.items() if value > 1]
UpperCamelCase__ :Union[str, Any] = []
for duplicate_key in duplicates:
UpperCamelCase__ :Dict = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} )
if len(__a ) > 1:
raise ValueError(
f'''{duplicate_key} is present several times in the documentation table of content at '''
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] )
UpperCamelCase__ :Union[str, Any] = sorted(__a , key=lambda __a : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(__a ) > 1:
raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' )
overview_doc.extend(__a )
# Sort
return overview_doc
def a ( __a=False ) -> Any:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :str = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :str = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :Optional[Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
UpperCamelCase__ :List[Any] = api_doc[scheduler_idx]['''sections''']
UpperCamelCase__ :Union[str, Any] = clean_doc_toc(__a )
UpperCamelCase__ :List[Any] = False
if new_scheduler_doc != scheduler_doc:
UpperCamelCase__ :Optional[int] = True
if overwrite:
UpperCamelCase__ :Dict = new_scheduler_doc
if diff:
if overwrite:
UpperCamelCase__ :Any = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
def a ( __a=False ) -> Optional[Any]:
'''simple docstring'''
with open(__a , encoding='''utf-8''' ) as f:
UpperCamelCase__ :str = yaml.safe_load(f.read() )
# Get to the API doc
UpperCamelCase__ :Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCamelCase__ :Any = content[api_idx]['''sections''']
# Then to the model doc
UpperCamelCase__ :str = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
UpperCamelCase__ :Any = False
UpperCamelCase__ :Union[str, Any] = api_doc[pipeline_idx]['''sections''']
UpperCamelCase__ :Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
UpperCamelCase__ :Dict = pipeline_doc['''section''']
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if overwrite:
UpperCamelCase__ :Optional[int] = new_sub_pipeline_doc
new_pipeline_docs.append(__a )
# sort overall pipeline doc
UpperCamelCase__ :Optional[Any] = clean_doc_toc(__a )
if new_pipeline_docs != pipeline_docs:
UpperCamelCase__ :int = True
if overwrite:
UpperCamelCase__ :Union[str, Any] = new_pipeline_docs
if diff:
if overwrite:
UpperCamelCase__ :Dict = api_doc
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__a , allow_unicode=__a ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite) | 219 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.