code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
from math import pi
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 49 | import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ) -> Optional[Any]:
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = do_resize
_A = size if size is not None else {"""height""": 18, """width""": 20}
_A = do_thumbnail
_A = do_align_axis
_A = do_pad
_A = do_normalize
_A = image_mean
_A = image_std
def UpperCAmelCase ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class a ( __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase ( self ) -> int:
_A = DonutImageProcessingTester(self )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_thumbnail""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_align_long_axis""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_pad""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
def UpperCAmelCase ( self ) -> Tuple:
_A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} )
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
# Previous config had dimensions in (width, height) order
_A = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} )
def UpperCAmelCase ( self ) -> Optional[int]:
pass
@is_flaky()
def UpperCAmelCase ( self ) -> Optional[int]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def UpperCAmelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
@is_flaky()
def UpperCAmelCase ( self ) -> int:
# Initialize image_processing
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
_A = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_A = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 180 | 0 |
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
lowerCamelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCamelCase_ = 12_8022
lowerCamelCase_ = 12_8028
@require_sentencepiece
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = MaMaaaTokenizer
__magic_name__ = False
__magic_name__ = False
__magic_name__ = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
super().setUp()
UpperCAmelCase_ : List[str] = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
UpperCAmelCase_ : Tuple = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
UpperCAmelCase_ : List[Any] = Path(self.tmpdirname )
save_json(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase_ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
UpperCAmelCase_ : Optional[Any] = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase_ : int ) -> List[Any]:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int ) -> List[Any]:
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
UpperCAmelCase_ : List[Any] = "</s>"
UpperCAmelCase_ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ : List[str] = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(lowerCAmelCase_ ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
UpperCAmelCase_ : Any = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(lowerCAmelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
UpperCAmelCase_ : int = tokenizer.convert_tokens_to_string(lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , "This is a test" )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
# fmt: off
UpperCAmelCase_ : Tuple = {"input_ids": [[128_022, 110_108, 397, 11, 38_272, 2_247, 124_811, 285, 18_105, 1_586, 207, 7, 39_534, 4_428, 397, 1_019, 18_105, 1_586, 207, 7, 41_337, 16_786, 241, 7, 20_214, 17, 125_690, 10_398, 7, 44_378, 58_069, 68_342, 7_798, 7_343, 11, 299, 33_310, 4, 158, 37_350, 94_077, 4_569, 299, 33_310, 90, 4, 52_840, 290, 4, 31_270, 112, 299, 682, 4, 52_840, 39_953, 14_079, 193, 52_519, 90_894, 17_894, 120_697, 11, 40_445, 551, 17, 1_019, 52_519, 90_894, 17_756, 963, 11, 40_445, 480, 17, 9_792, 1_120, 5_173, 1_393, 6_240, 16_786, 241, 120_996, 28, 1_245, 1_393, 118_240, 11_123, 1_019, 93_612, 2_691, 10_618, 98_058, 120_409, 1_928, 279, 4, 40_683, 367, 178, 207, 1_019, 103, 103_121, 506, 65_296, 5, 2], [128_022, 21_217, 367, 117, 125_450, 128, 719, 7, 7_308, 40, 93_612, 12_669, 1_116, 16_704, 71, 17_785, 3_699, 15_592, 35, 144, 9_584, 241, 11_943, 713, 950, 799, 2_247, 88_427, 150, 149, 118_813, 120_706, 1_019, 106_906, 81_518, 28, 1_224, 22_799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128_022, 1_658, 123_311, 5_155, 5_578, 4_722, 279, 14_947, 2_366, 1_120, 1_197, 14, 1_348, 9_232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase_ (unittest.TestCase ):
__magic_name__ = '''facebook/m2m100_418M'''
__magic_name__ = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
__magic_name__ = [
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
]
# fmt: off
__magic_name__ = [EN_CODE, 5_93, 19_49, 11_57_81, 4, 7_15_86, 42_34, 6_06_33, 12_62_33, 4_32, 12_38_08, 1_55_92, 11_97, 11_71_32, 12_06_18, 5, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ) -> List[str]:
UpperCAmelCase_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
UpperCAmelCase_ : List[str] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 128_006 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 128_022 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 128_076 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 128_063 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = self.tokenizer.get_vocab()
self.assertEqual(len(lowerCAmelCase_ ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
UpperCAmelCase_ : str = "en"
UpperCAmelCase_ : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
# fmt: off
UpperCAmelCase_ : int = [FR_CODE, 5_364, 82, 8_642, 4, 294, 47, 8, 14_028, 136, 3_286, 9_706, 6, 90_797, 6, 144_012, 162, 88_128, 30_061, 5, 2]
# fmt: on
UpperCAmelCase_ : List[Any] = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : Any = tempfile.mkdtemp()
UpperCAmelCase_ : Tuple = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(lowerCAmelCase_ )
UpperCAmelCase_ : int = MaMaaaTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.lang_token_to_id , lowerCAmelCase_ )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
UpperCAmelCase_ : Optional[int] = "en"
UpperCAmelCase_ : Optional[Any] = "fr"
UpperCAmelCase_ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="pt" )
UpperCAmelCase_ : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
UpperCAmelCase_ : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
UpperCAmelCase_ : Any = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : int = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
UpperCAmelCase_ : List[Any] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# en_XX, A, test, EOS
"input_ids": [[128_022, 58, 4_183, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 128_006,
} , )
| 253 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def snake_case ( A__ ,A__ ):
# For applying gaussian function for each element in matrix.
UpperCAmelCase_ : int = math.sqrt(A__ )
UpperCAmelCase_ : Tuple = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
UpperCAmelCase_ : int = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def snake_case ( A__ ,A__ ):
# Creates a gaussian kernel of given dimension.
UpperCAmelCase_ : List[Any] = np.zeros((kernel_size, kernel_size) )
for i in range(0 ,A__ ):
for j in range(0 ,A__ ):
UpperCAmelCase_ : List[Any] = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ,):
UpperCAmelCase_ : Union[str, Any] = np.zeros(img.shape )
UpperCAmelCase_ : Tuple = get_gauss_kernel(A__ ,A__ )
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = img.shape
for i in range(kernel_size // 2 ,size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 ,size_y - kernel_size // 2 ):
UpperCAmelCase_ : str = get_slice(A__ ,A__ ,A__ ,A__ )
UpperCAmelCase_ : int = img_s - img_s[kernel_size // 2, kernel_size // 2]
UpperCAmelCase_ : Optional[Any] = vec_gaussian(A__ ,A__ )
UpperCAmelCase_ : int = np.multiply(A__ ,A__ )
UpperCAmelCase_ : List[Any] = np.multiply(A__ ,A__ )
UpperCAmelCase_ : str = np.sum(A__ ) / np.sum(A__ )
UpperCAmelCase_ : Tuple = val
return imga
def snake_case ( A__ ):
UpperCAmelCase_ : Optional[int] = args[1] if args[1:] else "../image_data/lena.jpg"
UpperCAmelCase_ : Tuple = float(args[2] ) if args[2:] else 1.0
UpperCAmelCase_ : Optional[int] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
UpperCAmelCase_ : Dict = int(args[4] )
UpperCAmelCase_ : List[str] = kernel_size + abs(kernel_size % 2 - 1 )
else:
UpperCAmelCase_ : Dict = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = parse_args(sys.argv)
lowerCamelCase_ = cva.imread(filename, 0)
cva.imshow('''input image''', img)
lowerCamelCase_ = img / 255
lowerCamelCase_ = out.astype('''float32''')
lowerCamelCase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCamelCase_ = out * 255
lowerCamelCase_ = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 253 | 1 |
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any ) -> int:
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
SCREAMING_SNAKE_CASE_ = TapasConfig.from_json_file(__UpperCAmelCase )
# set absolute/relative position embeddings parameter
SCREAMING_SNAKE_CASE_ = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
SCREAMING_SNAKE_CASE_ = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WTQ":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = True
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE_ = 0.6_6_4_6_9_4
SCREAMING_SNAKE_CASE_ = 0.2_0_7_9_5_1
SCREAMING_SNAKE_CASE_ = 0.1_2_1_1_9_4
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = 0.0_3_5_2_5_1_3
SCREAMING_SNAKE_CASE_ = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = False
# hparam_utils.py hparams
SCREAMING_SNAKE_CASE_ = 3_6.4_5_1_9
SCREAMING_SNAKE_CASE_ = 0.9_0_3_4_2_1
SCREAMING_SNAKE_CASE_ = 2_2_2.0_8_8
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = 0.7_6_3_1_4_1
SCREAMING_SNAKE_CASE_ = TapasForQuestionAnswering(config=__UpperCAmelCase )
elif task == "TABFACT":
SCREAMING_SNAKE_CASE_ = TapasForSequenceClassification(config=__UpperCAmelCase )
elif task == "MLM":
SCREAMING_SNAKE_CASE_ = TapasForMaskedLM(config=__UpperCAmelCase )
elif task == "INTERMEDIATE_PRETRAINING":
SCREAMING_SNAKE_CASE_ = TapasModel(config=__UpperCAmelCase )
else:
raise ValueError(f"Task {task} not supported." )
print(f"Building PyTorch model from configuration: {config}" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# Save pytorch-model (weights and configuration)
print(f"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(__UpperCAmelCase )
# Save tokenizer files
print(f"Save tokenizer files to {pytorch_dump_path}" )
SCREAMING_SNAKE_CASE_ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=5_12 )
tokenizer.save_pretrained(__UpperCAmelCase )
print('Used relative position embeddings:' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase__ : str = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
) | 225 |
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number | (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number & ~(1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return number ^ (1 << position)
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> bool:
return ((number >> position) & 1) == 1
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 225 | 1 |
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a ( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__a , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__a , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__a )
return parser.parse_args()
def a ( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Tuple = parse_args()
# Import training_script as a module.
UpperCamelCase__ :Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
UpperCamelCase__ :List[Any] = script_fpath.stem
UpperCamelCase__ :Optional[Any] = importlib.import_module(__a )
# Patch sys.argv
UpperCamelCase__ :Optional[Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 219 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
UpperCamelCase__ :Union[str, Any] = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__a )
# Let's go
UpperCamelCase__ :Optional[int] = parser.parse_args()
if not hasattr(__a , '''func''' ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ :Optional[int] = args.func(__a )
service.run()
if __name__ == "__main__":
main() | 219 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 323 |
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCamelCase__ ( lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [R'''h\.\d+\.attn\.bias''', R'''h\.\d+\.attn\.masked_bias''']
@register_to_config
def __init__( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : int = 5_02_57 , lowerCamelCase_ : int = 10_24 , lowerCamelCase_ : int = 7_68 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : int = 12 , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : str = "gelu_new" , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 0.1 , lowerCamelCase_ : float = 1e-5 , lowerCamelCase_ : float = 0.02 , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = True , lowerCamelCase_ : bool = False , lowerCamelCase_ : bool = False , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = prefix_inner_dim
SCREAMING_SNAKE_CASE : List[str] = prefix_hidden_dim
SCREAMING_SNAKE_CASE : Tuple = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
SCREAMING_SNAKE_CASE : str = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
SCREAMING_SNAKE_CASE : Any = GPTaConfig(
vocab_size=lowerCamelCase_ , n_positions=lowerCamelCase_ , n_embd=lowerCamelCase_ , n_layer=lowerCamelCase_ , n_head=lowerCamelCase_ , n_inner=lowerCamelCase_ , activation_function=lowerCamelCase_ , resid_pdrop=lowerCamelCase_ , embd_pdrop=lowerCamelCase_ , attn_pdrop=lowerCamelCase_ , layer_norm_epsilon=lowerCamelCase_ , initializer_range=lowerCamelCase_ , scale_attn_weights=lowerCamelCase_ , use_cache=lowerCamelCase_ , scale_attn_by_inverse_layer_idx=lowerCamelCase_ , reorder_and_upcast_attn=lowerCamelCase_ , )
SCREAMING_SNAKE_CASE : Union[str, Any] = GPTaLMHeadModel(lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : torch.Tensor , lowerCamelCase_ : Optional[torch.Tensor] = None , lowerCamelCase_ : Optional[torch.Tensor] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.transformer.transformer.wte(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = self.encode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.decode_prefix(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
SCREAMING_SNAKE_CASE : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
SCREAMING_SNAKE_CASE : str = self.transformer(inputs_embeds=lowerCamelCase_ , labels=lowerCamelCase_ , attention_mask=lowerCamelCase_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : torch.device ):
'''simple docstring'''
return torch.zeros(lowerCamelCase_ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase_ )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
return self.encode_prefix(lowerCamelCase_ )
@torch.no_grad()
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = torch.split(lowerCamelCase_ , 1 , dim=0 )
SCREAMING_SNAKE_CASE : Dict = []
SCREAMING_SNAKE_CASE : Tuple = []
for feature in features:
SCREAMING_SNAKE_CASE : Optional[int] = self.decode_prefix(feature.to(lowerCamelCase_ ) ) # back to the clip feature
# Only support beam search for now
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Any = self.generate_beam(
input_embeds=lowerCamelCase_ , device=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = torch.stack(lowerCamelCase_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Dict=None , lowerCamelCase_ : int=None , lowerCamelCase_ : int = 5 , lowerCamelCase_ : int = 67 , lowerCamelCase_ : float = 1.0 , lowerCamelCase_ : Optional[int] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = eos_token_id
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.ones(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.int )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.zeros(lowerCamelCase_ , device=lowerCamelCase_ , dtype=torch.bool )
if input_embeds is not None:
SCREAMING_SNAKE_CASE : Dict = input_embeds
else:
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(lowerCamelCase_ )
for i in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[int] = self.transformer(inputs_embeds=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = outputs.logits
SCREAMING_SNAKE_CASE : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
SCREAMING_SNAKE_CASE : Any = logits.softmax(-1 ).log()
if scores is None:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = logits.topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : Optional[Any] = generated.expand(lowerCamelCase_ , *generated.shape[1:] )
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
SCREAMING_SNAKE_CASE : List[Any] = next_tokens
else:
SCREAMING_SNAKE_CASE : Dict = tokens.expand(lowerCamelCase_ , *tokens.shape[1:] )
SCREAMING_SNAKE_CASE : str = torch.cat((tokens, next_tokens) , dim=1 )
else:
SCREAMING_SNAKE_CASE : Tuple = -float(np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
SCREAMING_SNAKE_CASE : List[str] = scores_sum / seq_lengths[:, None]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average.view(-1 ).topk(lowerCamelCase_ , -1 )
SCREAMING_SNAKE_CASE : str = next_tokens // scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Tuple = seq_lengths[next_tokens_source]
SCREAMING_SNAKE_CASE : int = next_tokens % scores_sum.shape[1]
SCREAMING_SNAKE_CASE : Dict = next_tokens.unsqueeze(1 )
SCREAMING_SNAKE_CASE : Dict = tokens[next_tokens_source]
SCREAMING_SNAKE_CASE : Any = torch.cat((tokens, next_tokens) , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = generated[next_tokens_source]
SCREAMING_SNAKE_CASE : Optional[Any] = scores_sum_average * seq_lengths
SCREAMING_SNAKE_CASE : Any = is_stopped[next_tokens_source]
SCREAMING_SNAKE_CASE : Dict = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
SCREAMING_SNAKE_CASE : str = torch.cat((generated, next_token_embed) , dim=1 )
SCREAMING_SNAKE_CASE : Dict = is_stopped + next_tokens.eq(lowerCamelCase_ ).squeeze()
if is_stopped.all():
break
SCREAMING_SNAKE_CASE : int = scores / seq_lengths
SCREAMING_SNAKE_CASE : Dict = scores.argsort(descending=lowerCamelCase_ )
# tokens tensors are already padded to max_seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = [tokens[i] for i in order]
SCREAMING_SNAKE_CASE : Dict = torch.stack(lowerCamelCase_ , dim=0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 323 | 1 |
"""simple docstring"""
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> list:
"""simple docstring"""
_lowerCAmelCase = len(snake_case_ )
_lowerCAmelCase = []
for i in range(len(snake_case_ ) - pat_len + 1 ):
_lowerCAmelCase = True
for j in range(snake_case_ ):
if s[i + j] != pattern[j]:
_lowerCAmelCase = False
break
if match_found:
position.append(snake_case_ )
return position
if __name__ == "__main__":
assert naive_pattern_search('''ABCDEFG''', '''DE''') == [3]
print(naive_pattern_search('''ABAAABCDBBABCDDEBCABC''', '''ABC''')) | 365 |
"""simple docstring"""
from math import isqrt
def __UpperCAmelCase ( snake_case_ : int ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
_lowerCAmelCase = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def __UpperCAmelCase ( snake_case_ : int = 10**8 ) -> int:
"""simple docstring"""
_lowerCAmelCase = calculate_prime_numbers(max_number // 2 )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = len(snake_case_ ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'{solution() = }') | 317 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : Any ) -> Optional[int]:
"""simple docstring"""
a_ : Any = Mock()
a_ : Dict = conn, Mock()
a_ : Optional[int] = iter([1, None] )
a_ : List[str] = lambda __A : next(__A )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=__A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 32 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def A ( self : Any ) -> str:
UpperCAmelCase : Any = {
'''task_specific_params''': {
'''summarization''': {'''length_penalty''': 1.0, '''max_length''': 128, '''min_length''': 12, '''num_beams''': 4},
'''summarization_cnn''': {'''length_penalty''': 2.0, '''max_length''': 142, '''min_length''': 56, '''num_beams''': 4},
'''summarization_xsum''': {'''length_penalty''': 1.0, '''max_length''': 62, '''min_length''': 11, '''num_beams''': 6},
}
}
UpperCAmelCase : int = {
'''task_specific_params.summarization.length_penalty''': 1.0,
'''task_specific_params.summarization.max_length''': 128,
'''task_specific_params.summarization.min_length''': 12,
'''task_specific_params.summarization.num_beams''': 4,
'''task_specific_params.summarization_cnn.length_penalty''': 2.0,
'''task_specific_params.summarization_cnn.max_length''': 142,
'''task_specific_params.summarization_cnn.min_length''': 56,
'''task_specific_params.summarization_cnn.num_beams''': 4,
'''task_specific_params.summarization_xsum.length_penalty''': 1.0,
'''task_specific_params.summarization_xsum.max_length''': 62,
'''task_specific_params.summarization_xsum.min_length''': 11,
'''task_specific_params.summarization_xsum.num_beams''': 6,
}
self.assertEqual(flatten_dict(__snake_case ) , __snake_case )
def A ( self : int ) -> str:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__snake_case ) , x.transpose() ) )
UpperCAmelCase : str = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A ( self : str ) -> Union[str, Any]:
UpperCAmelCase : Any = np.random.randn(3 , 4 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Tuple = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Any = torch.tensor(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Optional[Any]:
UpperCAmelCase : int = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , transpose(__snake_case ).numpy() ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : str = tf.constant(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , transpose(__snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A ( self : Tuple ) -> Any:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case ) , np.asarray(transpose(__snake_case ) ) ) )
UpperCAmelCase : Dict = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(transpose(__snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(__snake_case , axes=(1, 2, 0) ) ) ) )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.reshape(__snake_case , (4, 3) ) ) )
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.reshape(__snake_case , (12, 5) ) ) )
@require_torch
def A ( self : Union[str, Any] ) -> int:
UpperCAmelCase : Dict = np.random.randn(3 , 4 )
UpperCAmelCase : Optional[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : List[Any] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_tf
def A ( self : int ) -> List[str]:
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 )
UpperCAmelCase : List[str] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , reshape(__snake_case , (4, 3) ).numpy() ) )
UpperCAmelCase : List[Any] = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = tf.constant(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , reshape(__snake_case , (12, 5) ).numpy() ) )
@require_flax
def A ( self : Any ) -> Dict:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Union[str, Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (4, 3) ) , np.asarray(reshape(__snake_case , (4, 3) ) ) ) )
UpperCAmelCase : Any = np.random.randn(3 , 4 , 5 )
UpperCAmelCase : Optional[Any] = jnp.array(__snake_case )
self.assertTrue(np.allclose(reshape(__snake_case , (12, 5) ) , np.asarray(reshape(__snake_case , (12, 5) ) ) ) )
def A ( self : List[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.squeeze(__snake_case ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.squeeze(__snake_case , axis=2 ) ) )
@require_torch
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : List[str] = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : Any = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : str = torch.tensor(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_tf
def A ( self : Optional[Any] ) -> Dict:
UpperCAmelCase : int = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , squeeze(__snake_case ).numpy() ) )
UpperCAmelCase : List[str] = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : Optional[int] = tf.constant(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , squeeze(__snake_case , axis=2 ).numpy() ) )
@require_flax
def A ( self : List[Any] ) -> Dict:
UpperCAmelCase : Optional[int] = np.random.randn(1 , 3 , 4 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case ) , np.asarray(squeeze(__snake_case ) ) ) )
UpperCAmelCase : str = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase : int = jnp.array(__snake_case )
self.assertTrue(np.allclose(squeeze(__snake_case , axis=2 ) , np.asarray(squeeze(__snake_case , axis=2 ) ) ) )
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Optional[Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.expand_dims(__snake_case , axis=1 ) ) )
@require_torch
def A ( self : List[str] ) -> Tuple:
UpperCAmelCase : Tuple = np.random.randn(3 , 4 )
UpperCAmelCase : Tuple = torch.tensor(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_tf
def A ( self : List[str] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = np.random.randn(3 , 4 )
UpperCAmelCase : Any = tf.constant(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , expand_dims(__snake_case , axis=1 ).numpy() ) )
@require_flax
def A ( self : Any ) -> List[Any]:
UpperCAmelCase : List[str] = np.random.randn(3 , 4 )
UpperCAmelCase : str = jnp.array(__snake_case )
self.assertTrue(np.allclose(expand_dims(__snake_case , axis=1 ) , np.asarray(expand_dims(__snake_case , axis=1 ) ) ) )
| 23 | 0 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->bool:
'''simple docstring'''
a : Tuple = len(_lowercase ) + 1
a : int = len(_lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
a : List[str] = [[0 for i in range(_lowercase )] for j in range(_lowercase )]
# since string of zero length match pattern of zero length
a : Optional[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _lowercase ):
a : int = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _lowercase ):
a : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _lowercase ):
for j in range(1 , _lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
a : List[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
a : str = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
a : List[Any] = dp[i - 1][j]
else:
a : Tuple = 0
else:
a : Union[str, Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a : Any = '''aab'''
a : List[Any] = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 79 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a : Optional[int] = {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/config.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/config.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/config.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/config.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/config.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/config.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] ="""albert"""
def __init__( self , lowerCAmelCase__=3_0000 , lowerCAmelCase__=128 , lowerCAmelCase__=4096 , lowerCAmelCase__=12 , lowerCAmelCase__=1 , lowerCAmelCase__=64 , lowerCAmelCase__=1_6384 , lowerCAmelCase__=1 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0.1 , lowerCAmelCase__="absolute" , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
a : str = vocab_size
a : Optional[Any] = embedding_size
a : List[str] = hidden_size
a : Optional[int] = num_hidden_layers
a : Optional[Any] = num_hidden_groups
a : str = num_attention_heads
a : Optional[int] = inner_group_num
a : List[Any] = hidden_act
a : str = intermediate_size
a : List[Any] = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : int = layer_norm_eps
a : List[str] = classifier_dropout_prob
a : int = position_embedding_type
class __UpperCamelCase ( a__ ):
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
a : Optional[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 79 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : List[Any] , __A : Optional[Any] , __A : int , __A : List[Any] , __A : Union[str, Any] ) -> str:
"""simple docstring"""
with open(__A ) as metadata_file:
a_ : Optional[int] = json.load(__A )
a_ : Any = LukeConfig(use_entity_aware_attention=__A , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
a_ : List[Any] = torch.load(__A , map_location='cpu' )['module']
# Load the entity vocab file
a_ : List[str] = load_original_entity_vocab(__A )
# add an entry for [MASK2]
a_ : Tuple = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ : Dict = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
a_ : Optional[Any] = AddedToken('<ent>' , lstrip=__A , rstrip=__A )
a_ : List[str] = AddedToken('<ent2>' , lstrip=__A , rstrip=__A )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__A )
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'r' ) as f:
a_ : Optional[int] = json.load(__A )
a_ : Union[str, Any] = 'MLukeTokenizer'
with open(os.path.join(__A , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(__A , __A )
with open(os.path.join(__A , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(__A , __A )
a_ : Union[str, Any] = MLukeTokenizer.from_pretrained(__A )
# Initialize the embeddings of the special tokens
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(['@'] )[0]
a_ : Optional[int] = tokenizer.convert_tokens_to_ids(['#'] )[0]
a_ : Any = state_dict['embeddings.word_embeddings.weight']
a_ : Dict = word_emb[ent_init_index].unsqueeze(0 )
a_ : Dict = word_emb[enta_init_index].unsqueeze(0 )
a_ : Optional[int] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ : Union[str, Any] = state_dict[bias_name]
a_ : Tuple = decoder_bias[ent_init_index].unsqueeze(0 )
a_ : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
a_ : List[str] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ : List[str] = F"""encoder.layer.{layer_index}.attention.self."""
a_ : List[str] = state_dict[prefix + matrix_name]
a_ : Optional[Any] = state_dict[prefix + matrix_name]
a_ : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ : Tuple = state_dict['entity_embeddings.entity_embeddings.weight']
a_ : Any = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
a_ : List[Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ : Dict = state_dict['entity_predictions.bias']
a_ : Optional[Any] = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
a_ : str = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ : str = LukeForMaskedLM(config=__A ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
a_ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
a_ : Any = state_dict[key]
else:
a_ : Dict = state_dict[key]
a_ , a_ : Dict = model.load_state_dict(__A , strict=__A )
if set(__A ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__A ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ : Optional[int] = MLukeTokenizer.from_pretrained(__A , task='entity_classification' )
a_ : Optional[int] = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
a_ : Dict = (0, 9)
a_ : Optional[int] = tokenizer(__A , entity_spans=[span] , return_tensors='pt' )
a_ : int = model(**__A )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ : Optional[Any] = torch.Size((1, 33, 7_68) )
a_ : Dict = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __A , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ : str = torch.Size((1, 1, 7_68) )
a_ : Optional[int] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __A , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ : Union[str, Any] = MLukeTokenizer.from_pretrained(__A )
a_ : Union[str, Any] = 'Tokyo is the capital of <mask>.'
a_ : Dict = (24, 30)
a_ : Tuple = tokenizer(__A , entity_spans=[span] , return_tensors='pt' )
a_ : str = model(**__A )
a_ : List[Any] = encoding['input_ids'][0].tolist()
a_ : List[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
a_ : List[str] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__A )
a_ : Any = outputs.entity_logits[0][0].argmax().item()
a_ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(__A ) )
model.save_pretrained(__A )
def SCREAMING_SNAKE_CASE_ ( __A : Optional[int] ) -> Tuple:
"""simple docstring"""
a_ : List[str] = ['[MASK]', '[PAD]', '[UNK]']
a_ : Any = [json.loads(__A ) for line in open(__A )]
a_ : Optional[Any] = {}
for entry in data:
a_ : List[str] = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ : Optional[int] = entity_id
break
a_ : List[Any] = F"""{language}:{entity_name}"""
a_ : str = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase_ : Any = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 32 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase__:
def __init__( self: Any , UpperCamelCase_: str , UpperCamelCase_: Dict ):
__lowerCamelCase = question_encoder
__lowerCamelCase = generator
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[Any] ):
if os.path.isfile(UpperCamelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """question_encoder_tokenizer""" )
__lowerCamelCase = os.path.join(UpperCamelCase_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(UpperCamelCase_ )
self.generator.save_pretrained(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: List[Any] , UpperCamelCase_: Dict , **UpperCamelCase_: Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
__lowerCamelCase = kwargs.pop("""config""" , UpperCamelCase_ )
if config is None:
__lowerCamelCase = RagConfig.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
__lowerCamelCase = AutoTokenizer.from_pretrained(
UpperCamelCase_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=UpperCamelCase_ , generator=UpperCamelCase_ )
def __call__( self: Tuple , *UpperCamelCase_: int , **UpperCamelCase_: int ):
return self.current_tokenizer(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple , *UpperCamelCase_: List[Any] , **UpperCamelCase_: List[Any] ):
return self.generator.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , *UpperCamelCase_: str , **UpperCamelCase_: Union[str, Any] ):
return self.generator.decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.question_encoder
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.generator
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: List[str] , UpperCamelCase_: Optional[List[str]] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: str = "longest" , UpperCamelCase_: str = None , UpperCamelCase_: bool = True , **UpperCamelCase_: int , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , UpperCamelCase_ , )
if max_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , max_length=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
__lowerCamelCase = self.current_tokenizer.model_max_length
__lowerCamelCase = self(
text_target=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_tensors=UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , **UpperCamelCase_ , )
__lowerCamelCase = labels["""input_ids"""]
return model_inputs
| 12 | 0 |
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
def __init__( self : Union[str, Any], *_UpperCAmelCase : Optional[Any], **_UpperCAmelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead.", _UpperCAmelCase, )
super().__init__(*_UpperCAmelCase, **_UpperCAmelCase )
| 191 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : Union[str, Any] = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 191 | 1 |
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
lowerCAmelCase_ = 25_60_47
lowerCAmelCase_ = 25_61_45
@require_sentencepiece
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = NllbTokenizer
SCREAMING_SNAKE_CASE : Any = NllbTokenizerFast
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[str] = True
SCREAMING_SNAKE_CASE : Union[str, Any] = {}
def snake_case__( self : List[str] ) ->Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = NllbTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def snake_case__( self : List[str] ) ->int:
snake_case_ = NllbTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
snake_case_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_UpperCamelCase , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def snake_case__( self : List[str] ) ->Any:
snake_case_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
snake_case_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=True
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(_UpperCamelCase , _UpperCamelCase )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
# Save tokenizer rust, legacy_format=False
snake_case_ = tempfile.mkdtemp()
snake_case_ = tokenizer_r.save_pretrained(_UpperCamelCase , legacy_format=_UpperCamelCase )
snake_case_ = tokenizer_p.save_pretrained(_UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
snake_case_ = tokenizer_r.from_pretrained(_UpperCamelCase )
snake_case_ = tokenizer_p.from_pretrained(_UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCamelCase , _UpperCamelCase ) )
shutil.rmtree(_UpperCamelCase )
@require_torch
def snake_case__( self : Any ) ->Optional[Any]:
if not self.test_seqaseq:
return
snake_case_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Longer text that will definitely require truncation.
snake_case_ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
snake_case_ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
snake_case_ = tokenizer.prepare_seqaseq_batch(
src_texts=_UpperCamelCase , tgt_texts=_UpperCamelCase , max_length=3 , max_target_length=1_0 , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 1_0 )
# max_target_length will default to max_length if not specified
snake_case_ = tokenizer.prepare_seqaseq_batch(
_UpperCamelCase , tgt_texts=_UpperCamelCase , max_length=3 , return_tensors='''pt''' )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
snake_case_ = tokenizer.prepare_seqaseq_batch(
src_texts=_UpperCamelCase , max_length=3 , max_target_length=1_0 , return_tensors='''pt''' )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn('''decoder_input_ids''' , _UpperCamelCase )
@unittest.skip('''Unfortunately way too slow to build a BPE with SentencePiece.''' )
def snake_case__( self : List[str] ) ->Tuple:
pass
def snake_case__( self : Any ) ->Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ = [AddedToken('''<special>''' , lstrip=_UpperCamelCase )]
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = tokenizer_r.encode('''Hey this is a <special> token''' )
snake_case_ = tokenizer_r.encode('''<special>''' , add_special_tokens=_UpperCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
snake_case_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase , )
snake_case_ = self.tokenizer_class.from_pretrained(
_UpperCamelCase , additional_special_tokens=_UpperCamelCase , **_UpperCamelCase )
snake_case_ = tokenizer_p.encode('''Hey this is a <special> token''' )
snake_case_ = tokenizer_cr.encode('''Hey this is a <special> token''' )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = "facebook/nllb-200-distilled-600M"
SCREAMING_SNAKE_CASE : int = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
SCREAMING_SNAKE_CASE : Dict = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
SCREAMING_SNAKE_CASE : Dict = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def snake_case__( cls : Optional[int] ) ->int:
snake_case_ = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''eng_Latn''' , tgt_lang='''ron_Latn''' )
snake_case_ = 1
return cls
def snake_case__( self : Optional[int] ) ->Any:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Arab'''] , 2_5_6_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ace_Latn'''] , 2_5_6_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''fra_Latn'''] , 2_5_6_0_5_7 )
def snake_case__( self : str ) ->Tuple:
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCamelCase )
def snake_case__( self : Any ) ->List[str]:
self.assertIn(_UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
snake_case_ = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7]
# fmt: on
snake_case_ = self.tokenizer.decode(_UpperCamelCase , skip_special_tokens=_UpperCamelCase )
snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCamelCase )
def snake_case__( self : Dict ) ->str:
snake_case_ = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , _UpperCamelCase )
snake_case_ = 1_0
snake_case_ = self.tokenizer(_UpperCamelCase , max_length=_UpperCamelCase , truncation=_UpperCamelCase ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _UpperCamelCase )
self.assertEqual(len(_UpperCamelCase ) , _UpperCamelCase )
def snake_case__( self : Tuple ) ->Union[str, Any]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_6_2_0_3, 3] )
def snake_case__( self : Dict ) ->List[Any]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCamelCase )
snake_case_ = NllbTokenizer.from_pretrained(_UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCamelCase )
@require_torch
def snake_case__( self : str ) ->Dict:
snake_case_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
snake_case_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id['''ron_Latn'''] )
self.assertIsInstance(_UpperCamelCase , _UpperCamelCase )
self.assertEqual((2, 1_5) , batch.input_ids.shape )
self.assertEqual((2, 1_5) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def snake_case__( self : List[Any] ) ->Any:
snake_case_ = self.tokenizer(self.src_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=3 , return_tensors='''pt''' )
snake_case_ = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCamelCase , truncation=_UpperCamelCase , max_length=1_0 , return_tensors='''pt''' )
snake_case_ = targets['''input_ids''']
snake_case_ = shift_tokens_right(
_UpperCamelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def snake_case__( self : Optional[int] ) ->Union[str, Any]:
snake_case_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
nested_simplify(_UpperCamelCase ) , {
# A, test, EOS, en_XX
'''input_ids''': [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_6_0_5_7,
} , )
@require_torch
def snake_case__( self : List[str] ) ->str:
snake_case_ = True
snake_case_ = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] )
snake_case_ = False
snake_case_ = self.tokenizer(
'''UN Chief says there is no military solution in Syria''' , src_lang='''eng_Latn''' , tgt_lang='''fra_Latn''' )
self.assertEqual(
inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] ) | 8 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class _snake_case ( _a ):
_A : int = ComputeEnvironment.AMAZON_SAGEMAKER
_A : List[Any] = True
_A : Dict = '''ml.p3.2xlarge'''
_A : Any = '''accelerate_sagemaker_execution_role'''
_A : Union[str, Any] = '''hf-sm'''
_A : Dict = '''us-east-1'''
_A : List[Any] = 1
_A : Union[str, Any] = '''accelerate-sagemaker-1'''
_A : List[Any] = '''1.6'''
_A : Optional[Any] = '''4.4'''
_A : Any = '''train.py'''
_A : int = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
_A : Optional[Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
SCREAMING_SNAKE_CASE:str = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["do_train"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["epochs"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["learning_rate"] ,SCREAMING_SNAKE_CASE__ )
assert isinstance(converted_args["max_steps"] ,SCREAMING_SNAKE_CASE__ )
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 139 | 0 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase : List[Any] =logging.get_logger(__name__)
lowerCAmelCase : List[str] ={
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class a_ ( _lowerCAmelCase ):
__A = "trajectory_transformer"
__A = ["past_key_values"]
__A = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowercase : Union[str, Any]=100 , lowercase : List[Any]=5 , lowercase : Dict=1 , lowercase : List[Any]=1 , lowercase : Any=249 , lowercase : List[Any]=6 , lowercase : Union[str, Any]=17 , lowercase : Any=25 , lowercase : Optional[int]=4 , lowercase : List[str]=4 , lowercase : Tuple=128 , lowercase : Dict=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : int=0.1 , lowercase : Any=0.00_06 , lowercase : List[str]=512 , lowercase : Optional[int]=0.02 , lowercase : List[str]=1e-1_2 , lowercase : Dict=1 , lowercase : Optional[Any]=True , lowercase : List[Any]=1 , lowercase : List[str]=50_256 , lowercase : Any=50_256 , **lowercase : Union[str, Any] , ):
"""simple docstring"""
lowercase_ :List[Any] = vocab_size
lowercase_ :Tuple = action_weight
lowercase_ :str = reward_weight
lowercase_ :List[Any] = value_weight
lowercase_ :List[str] = max_position_embeddings
lowercase_ :Any = block_size
lowercase_ :Optional[Any] = action_dim
lowercase_ :List[Any] = observation_dim
lowercase_ :List[Any] = transition_dim
lowercase_ :List[Any] = learning_rate
lowercase_ :Union[str, Any] = n_layer
lowercase_ :str = n_head
lowercase_ :Dict = n_embd
lowercase_ :Optional[Any] = embd_pdrop
lowercase_ :List[str] = attn_pdrop
lowercase_ :str = resid_pdrop
lowercase_ :List[Any] = initializer_range
lowercase_ :int = layer_norm_eps
lowercase_ :int = kaiming_initializer_range
lowercase_ :Tuple = use_cache
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 147 |
'''simple docstring'''
from __future__ import annotations
lowerCAmelCase : Any ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
class a_ :
def __init__( self : List[Any] , lowercase : dict[str, list[str]] , lowercase : str ):
"""simple docstring"""
lowercase_ :List[str] = graph
# mapping node to its parent in resulting breadth first tree
lowercase_ :dict[str, str | None] = {}
lowercase_ :Dict = source_vertex
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = {self.source_vertex}
lowercase_ :Tuple = None
lowercase_ :Dict = [self.source_vertex] # first in first out queue
while queue:
lowercase_ :int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowercase )
lowercase_ :Optional[Any] = vertex
queue.append(lowercase )
def lowercase__ ( self : Tuple , lowercase : str ):
"""simple docstring"""
if target_vertex == self.source_vertex:
return self.source_vertex
lowercase_ :List[str] = self.parent.get(lowercase )
if target_vertex_parent is None:
lowercase_ :Union[str, Any] = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(lowercase )
return self.shortest_path(lowercase ) + F'->{target_vertex}'
if __name__ == "__main__":
lowerCAmelCase : Dict =Graph(graph, '''G''')
g.breath_first_search()
print(g.shortest_path('''D'''))
print(g.shortest_path('''G'''))
print(g.shortest_path('''Foo'''))
| 147 | 1 |
from collections import deque
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
lowerCAmelCase_ = process_name # process name
lowerCAmelCase_ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase_ = arrival_time
lowerCAmelCase_ = burst_time # remaining burst time
lowerCAmelCase_ = 0 # total time of the process wait in ready queue
lowerCAmelCase_ = 0 # time from arrival time to completion time
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]:
# total number of mlfq's queues
lowerCAmelCase_ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase_ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase_ = queue
# current time
lowerCAmelCase_ = current_time
# finished process is in this sequence queue
lowerCAmelCase_ = deque()
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __a ( self , _UpperCamelCase ) -> int:
lowerCAmelCase_ = []
for i in range(len(_lowerCamelCase ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __a ( self , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = []
for i in range(len(_lowerCamelCase ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __a ( self , _UpperCamelCase ) -> str:
lowerCAmelCase_ = []
for i in range(len(_lowerCamelCase ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __a ( self , _UpperCamelCase ) -> List[str]:
return [q.burst_time for q in queue]
def __a ( self , _UpperCamelCase ) -> Any:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __a ( self , _UpperCamelCase ) -> Dict:
lowerCAmelCase_ = deque() # sequence deque of finished process
while len(_lowerCamelCase ) != 0:
lowerCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_lowerCamelCase )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase_ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase_ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase_ = self.current_time
# add the process to queue that has finished queue
finished.append(_lowerCamelCase )
self.finish_queue.extend(_lowerCamelCase ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> List[Any]:
lowerCAmelCase_ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_lowerCamelCase ) ):
lowerCAmelCase_ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_lowerCamelCase )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase_ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_lowerCamelCase )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase_ = 0
# set the finish time
lowerCAmelCase_ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase_ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_lowerCamelCase )
self.finish_queue.extend(_lowerCamelCase ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __a ( self ) -> Tuple:
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase_ , lowerCAmelCase_ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_A = Process("P1", 0, 53)
_A = Process("P2", 0, 17)
_A = Process("P3", 0, 68)
_A = Process("P4", 0, 24)
_A = 3
_A = [17, 25]
_A = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_A = Process("P1", 0, 53)
_A = Process("P2", 0, 17)
_A = Process("P3", 0, 68)
_A = Process("P4", 0, 24)
_A = 3
_A = [17, 25]
_A = deque([Pa, Pa, Pa, Pa])
_A = MLFQ(number_of_queues, time_slices, queue, 0)
_A = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
f"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
f"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 231 |
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCAmelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( A_ ):
__a = """masked_bert"""
def __init__( self : Union[str, Any] , _lowerCamelCase : Any=30522 , _lowerCamelCase : Union[str, Any]=768 , _lowerCamelCase : Tuple=12 , _lowerCamelCase : Any=12 , _lowerCamelCase : str=3072 , _lowerCamelCase : str="gelu" , _lowerCamelCase : int=0.1 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : Dict=512 , _lowerCamelCase : List[Any]=2 , _lowerCamelCase : int=0.0_2 , _lowerCamelCase : Union[str, Any]=1e-12 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : List[str]="topK" , _lowerCamelCase : Optional[Any]="constant" , _lowerCamelCase : Optional[Any]=0.0 , **_lowerCamelCase : str , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = pruning_method
_snake_case = mask_init
_snake_case = mask_scale
| 288 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowerCAmelCase__ = logging.get_logger(__name__)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['pixel_values']
def __init__( self : Tuple ,lowercase__ : bool = True ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : PILImageResampling = PILImageResampling.BILINEAR ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : bool = True ,lowercase__ : Union[int, float] = 1 / 2_5_5 ,lowercase__ : bool = True ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,**lowercase__ : List[Any] ,):
super().__init__(**lowercase__ )
__lowercase = size if size is not None else {'''shortest_edge''': 2_5_6}
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
__lowercase = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' )
__lowercase = do_resize
__lowercase = size
__lowercase = resample
__lowercase = do_center_crop
__lowercase = crop_size
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : PILImageResampling = PILImageResampling.BICUBIC ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Dict ,):
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
__lowercase = get_resize_output_image_size(lowercase__ ,size=size['''shortest_edge'''] ,default_to_square=lowercase__ )
return resize(lowercase__ ,size=lowercase__ ,resample=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : np.ndarray ,lowercase__ : Dict[str, int] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : int ,):
__lowercase = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(lowercase__ ,size=(size['''height'''], size['''width''']) ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : np.ndarray ,lowercase__ : float ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : Optional[int] ):
return rescale(lowercase__ ,scale=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : np.ndarray ,lowercase__ : Union[float, List[float]] ,lowercase__ : Union[float, List[float]] ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ,):
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,data_format=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : ImageInput ,lowercase__ : Optional[bool] = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : PILImageResampling = None ,lowercase__ : bool = None ,lowercase__ : Dict[str, int] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[float] = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[float, List[float]]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**lowercase__ : Dict ,):
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowercase__ ,default_to_square=lowercase__ )
__lowercase = resample if resample is not None else self.resample
__lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowercase = crop_size if crop_size is not None else self.crop_size
__lowercase = get_size_dict(lowercase__ ,param_name='''crop_size''' )
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowercase__ ,size=lowercase__ ,resample=lowercase__ ) for image in images]
if do_center_crop:
__lowercase = [self.center_crop(image=lowercase__ ,size=lowercase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowercase__ ,scale=lowercase__ ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ,mean=lowercase__ ,std=lowercase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowercase__ ,lowercase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowercase__ ,tensor_type=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : Union[str, Any] ,lowercase__ : List[Tuple] = None ):
__lowercase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase__ ):
__lowercase = target_sizes.numpy()
__lowercase = []
for idx in range(len(lowercase__ ) ):
__lowercase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='''bilinear''' ,align_corners=lowercase__ )
__lowercase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase__ )
else:
__lowercase = logits.argmax(dim=1 )
__lowercase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 368 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase_ :
"""simple docstring"""
def __init__( self : int ,lowercase__ : str ,lowercase__ : List[Any]=1_3 ,lowercase__ : Optional[int]=3_2 ,lowercase__ : Any=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] ,lowercase__ : List[Any]=[2, 2, 3, 2] ,lowercase__ : List[Any]=True ,lowercase__ : Optional[Any]=True ,lowercase__ : int=3_7 ,lowercase__ : Union[str, Any]="gelu" ,lowercase__ : Tuple=1_0 ,lowercase__ : int=0.0_2 ,lowercase__ : Any=["stage2", "stage3", "stage4"] ,lowercase__ : Optional[Any]=3 ,lowercase__ : Tuple=None ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = num_stages
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = out_features
__lowercase = num_labels
__lowercase = scope
__lowercase = num_stages
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self : str ):
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=5_1_2 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=lowercase__ ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=4_0 ,auxiliary_channels=2_5_6 ,auxiliary_num_convs=1 ,auxiliary_concat_input=lowercase__ ,loss_ignore_index=2_5_5 ,num_labels=self.num_labels ,)
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : List[str] ,lowercase__ : Any ,lowercase__ : Any ):
__lowercase = UperNetForSemanticSegmentation(config=lowercase__ )
model.to(lowercase__ )
model.eval()
__lowercase = model(lowercase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Tuple = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {}
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Dict = False
SCREAMING_SNAKE_CASE : Any = False
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = UperNetModelTester(self )
__lowercase = ConfigTester(self ,config_class=lowercase__ ,has_text_modality=lowercase__ ,hidden_size=3_7 )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
return
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(lowercase__ )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase__ )
@unittest.skip(reason='''UperNet does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self : int ):
pass
@unittest.skip(reason='''UperNet does not support input and output embeddings''' )
def SCREAMING_SNAKE_CASE ( self : str ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@unittest.skip(reason='''UperNet does not have a base model''' )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( self : Any ):
def check_hidden_states_output(lowercase__ : List[str] ,lowercase__ : int ,lowercase__ : List[str] ):
__lowercase = model_class(lowercase__ )
model.to(lowercase__ )
model.eval()
with torch.no_grad():
__lowercase = model(**self._prepare_for_class(lowercase__ ,lowercase__ ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(lowercase__ ,lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = _config_zero_init(lowercase__ )
__lowercase = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
__lowercase = model_class(config=lowercase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F"Parameter {name} of model {model_class} seems not properly initialized" ,)
@unittest.skip(reason='''UperNet does not have tied weights''' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
pass
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = UperNetForSemanticSegmentation.from_pretrained(lowercase__ )
self.assertIsNotNone(lowercase__ )
def _A ( ):
"""simple docstring"""
__lowercase = hf_hub_download(
repo_id='''hf-internal-testing/fixtures_ade20k''' , repo_type='''dataset''' , filename='''ADE_val_00000001.jpg''' )
__lowercase = Image.open(A__ ).convert('''RGB''' )
return image
@require_torch
@require_vision
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-swin-tiny''' )
__lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-swin-tiny''' ).to(lowercase__ )
__lowercase = prepare_img()
__lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = AutoImageProcessor.from_pretrained('''openmmlab/upernet-convnext-tiny''' )
__lowercase = UperNetForSemanticSegmentation.from_pretrained('''openmmlab/upernet-convnext-tiny''' ).to(lowercase__ )
__lowercase = prepare_img()
__lowercase = processor(images=lowercase__ ,return_tensors='''pt''' ).to(lowercase__ )
with torch.no_grad():
__lowercase = model(**lowercase__ )
__lowercase = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape ,lowercase__ )
__lowercase = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(lowercase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase__ ,atol=1e-4 ) )
| 52 | 0 |
from functools import lru_cache
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 2
__magic_name__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A_ )
if n > 1:
factors.add(A_ )
return factors
@lru_cache
def a__ ( A_ ):
'''simple docstring'''
return len(unique_prime_factors(A_ ) )
def a__ ( A_ ):
'''simple docstring'''
return len(set(A_ ) ) in (0, 1)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 2
while True:
# Increment each value of a generated range
__magic_name__ = [base + i for i in range(A_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__magic_name__ = [upf_len(A_ ) for x in group]
checker.append(A_ )
# If all numbers in the list are equal, return the group variable.
if equality(A_ ):
return group
# Increment our base variable by 1
base += 1
def a__ ( A_ = 4 ):
'''simple docstring'''
__magic_name__ = run(A_ )
return results[0] if len(A_ ) else None
if __name__ == "__main__":
print(solution())
| 88 |
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowerCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( A_=True ):
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_A ) )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = None
a__ = None
def _lowercase ( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] ) -> Tuple:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__magic_name__ = dataset_module_factory(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
__magic_name__ = import_main_class(dataset_module.module_path , dataset=UpperCamelCase__ )
__magic_name__ = builder_cls(
cache_dir=UpperCamelCase__ , config_name=UpperCamelCase__ , hash=dataset_module.hash , )
__magic_name__ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=UpperCamelCase__ ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__magic_name__ = cached_path(UpperCamelCase__ , cache_dir=UpperCamelCase__ )
self.assertTrue(os.path.exists(UpperCamelCase__ ) )
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__magic_name__ = None
builder_instance.download_and_prepare()
__magic_name__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = dataset_module_factory("""wikipedia""", cache_dir=A_ )
__magic_name__ = import_main_class(dataset_module.module_path, dataset=A_ )
__magic_name__ = builder_cls(
cache_dir=A_, config_name="""20220301.frr""", hash=dataset_module.hash, )
__magic_name__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A_, A_ )
assert "train" in ds
assert isinstance(ds["""train"""], A_ )
assert next(iter(ds["""train"""] ) )
| 88 | 1 |
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : int , __A : List[str] ):
# we need a list not a string, so do something to change the type
snake_case__ : Dict = arr.split("," )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = [int(self.array[0] )] * len(self.array )
snake_case__ : Optional[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case__ : Tuple = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case__ : Dict = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowerCamelCase : Tuple = input("""please input some numbers:""")
__lowerCamelCase : List[Any] = SubArray(whole_array)
__lowerCamelCase : List[Any] = array.solve_sub_array()
print(("""the results is:""", re))
| 356 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Optional[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__, snake_case__ : Optional[Any] = emb.weight.shape
snake_case__ : Tuple = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
snake_case__ : Optional[int] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[Any]="facebook/mbart-large-en-ro" , snake_case_ : Optional[int]=False , snake_case_ : List[Any]=False ):
snake_case__ : Tuple = torch.load(snake_case_ , map_location="cpu" )["model"]
remove_ignore_keys_(snake_case_ )
snake_case__ : Any = state_dict["encoder.embed_tokens.weight"].shape[0]
snake_case__ : List[Any] = MBartConfig.from_pretrained(snake_case_ , vocab_size=snake_case_ )
if mbart_aa and finetuned:
snake_case__ : int = "relu"
snake_case__ : List[str] = state_dict["decoder.embed_tokens.weight"]
snake_case__ : Tuple = MBartForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ )
if finetuned:
snake_case__ : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
__lowerCamelCase : Optional[Any] = parser.parse_args()
__lowerCamelCase : str = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 286 | 0 |
def A_ ( a ):
"""simple docstring"""
return "".join(chr(ord(a ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 253 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCAmelCase : List[Any] = datasets.load_iris()
lowerCAmelCase : List[str] = np.array(data['data'])
lowerCAmelCase : Any = np.array(data['target'])
lowerCAmelCase : Dict = data['target_names']
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = train_test_split(X, y)
def A_ ( a , a ):
"""simple docstring"""
return np.linalg.norm(np.array(a ) - np.array(a ) )
def A_ ( a , a , a , a , a=5 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = zip(a , a )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE_ : List[Any] = []
for data_point in data:
SCREAMING_SNAKE_CASE_ : Optional[int] = euclidean_distance(data_point[0] , a )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE_ : List[str] = [i[1] for i in sorted(a )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE_ : List[Any] = Counter(a ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 253 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 365 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('time_embed.0.weight', 'time_embedding.linear_1.weight'),
('time_embed.0.bias', 'time_embedding.linear_1.bias'),
('time_embed.2.weight', 'time_embedding.linear_2.weight'),
('time_embed.2.bias', 'time_embedding.linear_2.bias'),
('input_blocks.0.0.weight', 'conv_in.weight'),
('input_blocks.0.0.bias', 'conv_in.bias'),
('out.0.weight', 'conv_norm_out.weight'),
('out.0.bias', 'conv_norm_out.bias'),
('out.2.weight', 'conv_out.weight'),
('out.2.bias', 'conv_out.bias'),
]
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('in_layers.0', 'norm1'),
('in_layers.2', 'conv1'),
('out_layers.0', 'norm2'),
('out_layers.3', 'conv2'),
('emb_layers.1', 'time_emb_proj'),
('skip_connection', 'conv_shortcut'),
]
UpperCamelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
UpperCamelCase = F"""down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
UpperCamelCase = F"""up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
UpperCamelCase = F"""up_blocks.{i}.attentions.{j}."""
UpperCamelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
UpperCamelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
UpperCamelCase = 'mid_block.attentions.0.'
UpperCamelCase = 'middle_block.1.'
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
UpperCamelCase = F"""mid_block.resnets.{j}."""
UpperCamelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('nin_shortcut', 'conv_shortcut'),
('norm_out', 'conv_norm_out'),
('mid.attn_1.', 'mid_block.attentions.0.'),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
UpperCamelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
UpperCamelCase = F"""down_blocks.{i}.downsamplers.0."""
UpperCamelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
UpperCamelCase = F"""up_blocks.{i}.upsamplers.0."""
UpperCamelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
UpperCamelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
UpperCamelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
UpperCamelCase = F"""mid_block.resnets.{i}."""
UpperCamelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('norm.', 'group_norm.'),
('q.', 'query.'),
('k.', 'key.'),
('v.', 'value.'),
('proj_out.', 'proj_attn.'),
]
def _A ( lowerCAmelCase_ : List[str] ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ = v.replace(lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase__ = v
lowerCAmelCase__ = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F'mid.attn_1.{weight_name}.weight' in k:
print(F'Reshaping {k} for SD format' )
lowerCAmelCase__ = reshape_weight_for_sd(lowerCAmelCase_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
UpperCamelCase = [
# (stable-diffusion, HF Diffusers)
('resblocks.', 'text_model.encoder.layers.'),
('ln_1', 'layer_norm1'),
('ln_2', 'layer_norm2'),
('.c_fc.', '.fc1.'),
('.c_proj.', '.fc2.'),
('.attn', '.self_attn'),
('ln_final.', 'transformer.text_model.final_layer_norm.'),
('token_embedding.weight', 'transformer.text_model.embeddings.token_embedding.weight'),
('positional_embedding', 'transformer.text_model.embeddings.position_embedding.weight'),
]
UpperCamelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
UpperCamelCase = re.compile('|'.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
UpperCamelCase = {'q': 0, 'k': 1, 'v': 2}
def _A ( lowerCAmelCase_ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowerCAmelCase__ = k[: -len(".q_proj.weight" )]
lowerCAmelCase__ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowerCAmelCase__ = k[: -len(".q_proj.bias" )]
lowerCAmelCase__ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ = [None, None, None]
lowerCAmelCase__ = v
continue
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase__ = textenc_pattern.sub(lambda lowerCAmelCase_ : protected[re.escape(m.group(0 ) )] , lowerCAmelCase_ )
lowerCAmelCase__ = torch.cat(lowerCAmelCase_ )
return new_state_dict
def _A ( lowerCAmelCase_ : Any ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--use_safetensors', action='store_true', help='Save weights use safetensors, default is ckpt.'
)
UpperCamelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.safetensors')
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'model.safetensors')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
UpperCamelCase = load_file(unet_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'unet', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(unet_path, map_location='cpu')
if osp.exists(vae_path):
UpperCamelCase = load_file(vae_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'vae', 'diffusion_pytorch_model.bin')
UpperCamelCase = torch.load(vae_path, map_location='cpu')
if osp.exists(text_enc_path):
UpperCamelCase = load_file(text_enc_path, device='cpu')
else:
UpperCamelCase = osp.join(args.model_path, 'text_encoder', 'pytorch_model.bin')
UpperCamelCase = torch.load(text_enc_path, map_location='cpu')
# Convert the UNet model
UpperCamelCase = convert_unet_state_dict(unet_state_dict)
UpperCamelCase = {'model.diffusion_model.' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
UpperCamelCase = convert_vae_state_dict(vae_state_dict)
UpperCamelCase = {'first_stage_model.' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
UpperCamelCase = 'text_model.encoder.layers.22.layer_norm2.bias' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
UpperCamelCase = {'transformer.' + k: v for k, v in text_enc_dict.items()}
UpperCamelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
UpperCamelCase = {'cond_stage_model.model.' + k: v for k, v in text_enc_dict.items()}
else:
UpperCamelCase = convert_text_enc_state_dict(text_enc_dict)
UpperCamelCase = {'cond_stage_model.transformer.' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
UpperCamelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
UpperCamelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
UpperCamelCase = {'state_dict': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 221 | 0 |
from __future__ import annotations
__lowerCamelCase : List[str] = list[list[int]]
# assigning initial values to the grid
__lowerCamelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__lowerCamelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE__ = 0
return None
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__UpperCamelCase , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
__lowerCamelCase : Optional[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 219 | import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__lowerCamelCase : List[str] = object()
# For specifying empty leaf dict `{}`
__lowerCamelCase : Optional[int] = object()
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(__UpperCamelCase ) - len(__UpperCamelCase ) + 1 ):
SCREAMING_SNAKE_CASE__ = [x.match(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , ks[i:] )]
if matches and all(__UpperCamelCase ):
return True
return False
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] ) -> List[str]:
"""simple docstring"""
def replace(__UpperCamelCase : Tuple , __UpperCamelCase : Any ):
for rule, replacement in rules:
if _match(__UpperCamelCase , __UpperCamelCase ):
return replacement
return val
return replace
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , __UpperCamelCase )),
(("transformer", "wte", "embedding"), P("""mp""" , __UpperCamelCase )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(__UpperCamelCase , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(__UpperCamelCase , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , __UpperCamelCase )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = _get_partition_rules()
SCREAMING_SNAKE_CASE__ = _replacement_rules(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {k: _unmatched for k in flatten_dict(__UpperCamelCase )}
SCREAMING_SNAKE_CASE__ = {k: replace(__UpperCamelCase , __UpperCamelCase ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(__UpperCamelCase ) )
| 219 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class lowerCamelCase_( snake_case__ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__ ):
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def snake_case__ ( self , lowerCamelCase__=None ):
_lowerCamelCase = {}
if top_k is not None:
_lowerCamelCase = top_k
return {}, {}, postprocess_params
def __call__( self , lowerCamelCase__ , **lowerCamelCase__ ):
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = load_image(UpperCAmelCase_ )
_lowerCamelCase = self.image_processor(images=UpperCAmelCase_ , return_tensors=self.framework )
return model_inputs
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = self.model(**UpperCAmelCase_ )
return model_outputs
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase = model_outputs.logits.softmax(-1 )[0]
_lowerCamelCase = probs.topk(UpperCAmelCase_ )
elif self.framework == "tf":
_lowerCamelCase = stable_softmax(model_outputs.logits , axis=-1 )[0]
_lowerCamelCase = tf.math.top_k(UpperCAmelCase_ , k=UpperCAmelCase_ )
_lowerCamelCase = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
_lowerCamelCase = scores.tolist()
_lowerCamelCase = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
| 365 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'poolformer'
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=1_6 , lowerCamelCase__=1_6 , lowerCamelCase__=3 , lowerCamelCase__=4.0 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[2, 1, 1, 1] , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = stride
_lowerCamelCase = padding
_lowerCamelCase = pool_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = mlp_ratio
_lowerCamelCase = depths
_lowerCamelCase = patch_sizes
_lowerCamelCase = strides
_lowerCamelCase = num_encoder_blocks
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_layer_scale
_lowerCamelCase = layer_scale_init_value
_lowerCamelCase = initializer_range
super().__init__(**lowerCamelCase__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = version.parse('1.11' )
@property
def snake_case__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self ):
return 2e-3
| 73 | 0 |
from __future__ import annotations
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "MIT"
UpperCAmelCase__ = "1.0.0"
UpperCAmelCase__ = "Muhammad Umer Farooq"
UpperCAmelCase__ = "contact@muhammadumerfarooq.me"
UpperCAmelCase__ = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __UpperCAmelCase : str ) ->None:
"""simple docstring"""
super().__init__()
a = []
a = domain
def __lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : list[tuple[str, str | None]] ) ->None:
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
a = parse.urljoin(self.domain , __UpperCAmelCase )
self.urls.append(__UpperCAmelCase )
def _a ( a :str ) -> str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def _a ( a :str ) -> str:
return parse.urlparse(a ).netloc
def _a ( a :str = "https://github.com" ) -> list[str]:
a = get_domain_name(a )
# Initialize the parser
a = Parser(a )
try:
# Open URL
a = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
a = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
a = requests.get(a )
# Get the valid email.
a = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
UpperCAmelCase__ = emails_from_url("https://github.com")
print(f"""{len(emails)} emails found:""")
print("\n".join(sorted(emails)))
| 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_snake_case : Union[str, Any] = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Union[str, Any] = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['LayoutLMv3FeatureExtractor']
_snake_case : str = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 284 | 0 |
"""simple docstring"""
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : int )-> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
_lowerCamelCase = number_of_bytes // partitions
_lowerCamelCase = []
for i in range(_lowerCAmelCase ):
_lowerCamelCase = i * bytes_per_partition + 1
_lowerCamelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : int )-> list[int]:
if num <= 0:
raise ValueError('Input must be a positive integer' )
_lowerCamelCase = [True] * (num + 1)
_lowerCamelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case ):
_lowerCamelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] =int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 80 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" )
_A = AutoTokenizer.from_pretrained("google/mt5-small" )
_A = tokenizer("Hello there" , return_tensors="tf" ).input_ids
_A = tokenizer("Hi I am" , return_tensors="tf" ).input_ids
_A = model(__UpperCAmelCase , labels=__UpperCAmelCase ).loss
_A = -tf.math.reduce_mean(__UpperCAmelCase ).numpy()
_A = -21.228168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 79 |
'''simple docstring'''
import comet # From: unbabel-comet
import torch
import datasets
lowerCamelCase_ = datasets.logging.get_logger(__name__)
lowerCamelCase_ = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
lowerCamelCase_ = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
lowerCamelCase_ = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
"""simple docstring"""
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
if self.config_name == "default":
_A = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
_A = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def lowerCAmelCase ( self : str , __UpperCAmelCase : str , __UpperCAmelCase : int , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=False ):
'''simple docstring'''
if gpus is None:
_A = 1 if torch.cuda.is_available() else 0
_A = {"src": sources, "mt": predictions, "ref": references}
_A = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )]
_A , _A = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 79 | 1 |
def A (__A : int = 4000000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = [0, 1]
UpperCAmelCase_ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCAmelCase_ = 0
for j in range(len(__A ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 7 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : Optional[Any] = FlaxAutoencoderKL
@property
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = 4
UpperCAmelCase_ = 3
UpperCAmelCase_ = (32, 32)
UpperCAmelCase_ = jax.random.PRNGKey(0)
UpperCAmelCase_ = jax.random.uniform(_snake_case , ((batch_size, num_channels) + sizes))
return {"sample": image, "prng_key": prng_key}
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
UpperCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 7 | 1 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCamelCase_ = "scheduler_config.json"
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : Any = 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : int = 3
SCREAMING_SNAKE_CASE_ : Optional[Any] = 4
SCREAMING_SNAKE_CASE_ : Any = 5
SCREAMING_SNAKE_CASE_ : Any = 6
SCREAMING_SNAKE_CASE_ : int = 7
SCREAMING_SNAKE_CASE_ : Any = 8
SCREAMING_SNAKE_CASE_ : List[Any] = 9
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 10
SCREAMING_SNAKE_CASE_ : List[str] = 11
SCREAMING_SNAKE_CASE_ : Optional[int] = 12
SCREAMING_SNAKE_CASE_ : Dict = 13
SCREAMING_SNAKE_CASE_ : Dict = 14
@dataclass
class _SCREAMING_SNAKE_CASE( A ):
SCREAMING_SNAKE_CASE_ : torch.FloatTensor
class _SCREAMING_SNAKE_CASE:
SCREAMING_SNAKE_CASE_ : Optional[Any] = SCHEDULER_CONFIG_NAME
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
@classmethod
def _UpperCamelCase ( cls ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__ = None ,SCREAMING_SNAKE_CASE__=False ,**SCREAMING_SNAKE_CASE__ ,) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE :Optional[int] = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE__ ,subfolder=SCREAMING_SNAKE_CASE__ ,return_unused_kwargs=SCREAMING_SNAKE_CASE__ ,return_commit_hash=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return cls.from_config(SCREAMING_SNAKE_CASE__ ,return_unused_kwargs=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ = False ,**SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
self.save_config(save_directory=SCREAMING_SNAKE_CASE__ ,push_to_hub=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def _UpperCamelCase ( cls ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = list(set([cls.__name__] + cls._compatibles ) )
__SCREAMING_SNAKE_CASE :Union[str, Any] = importlib.import_module(__name__.split('''.''' )[0] )
__SCREAMING_SNAKE_CASE :List[Any] = [
getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
]
return compatible_classes | 191 |
"""simple docstring"""
import qiskit
def __lowerCamelCase ( a_ : int , a_ : int ) -> qiskit.result.counts.Counts:
__SCREAMING_SNAKE_CASE :Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
__SCREAMING_SNAKE_CASE :Union[str, Any] = qiskit.QuantumCircuit(a_ , a_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
__SCREAMING_SNAKE_CASE :Tuple = qiskit.execute(a_ , a_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a_ )
if __name__ == "__main__":
lowerCamelCase_ = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}') | 191 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class a ( metaclass=_lowerCamelCase ):
snake_case_ = ["note_seq"]
def __init__( self : str , *lowercase_ : Tuple , **lowercase_ : Union[str, Any] ):
requires_backends(self , ['''note_seq'''] )
@classmethod
def A_ ( cls : Tuple , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ):
requires_backends(cls , ['''note_seq'''] )
@classmethod
def A_ ( cls : List[str] , *lowercase_ : Tuple , **lowercase_ : List[str] ):
requires_backends(cls , ['''note_seq'''] )
| 72 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
a : int = 'bert-base-cased'
a : Optional[int] = 'google/pegasus-xsum'
a : Optional[int] = [' Sam ate lunch today.', 'Sams lunch ingredients.']
a : int = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
a : Dict = 'patrickvonplaten/t5-tiny-random'
a : Any = 'sshleifer/bart-tiny-random'
a : Union[str, Any] = 'sshleifer/tiny-mbart'
a : Optional[int] = 'sshleifer/tiny-marian-en-de'
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = '''\n'''.join(__UpperCAmelCase )
Path(__UpperCAmelCase ).open('''w''' ).writelines(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> str:
'''simple docstring'''
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(__UpperCAmelCase, F"{split}.source" ), __UpperCAmelCase )
_dump_articles(os.path.join(__UpperCAmelCase, F"{split}.target" ), __UpperCAmelCase )
return tmp_dir
class a ( _lowerCamelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def A_ ( self : int , lowercase_ : Optional[Any] ):
snake_case_ = AutoTokenizer.from_pretrained(lowercase_ )
snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case_ = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
snake_case_ = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
snake_case_ = 4
snake_case_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
snake_case_ ,snake_case_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
snake_case_ = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='''train''' , max_source_length=lowercase_ , max_target_length=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , )
snake_case_ = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(lowercase_ , lowercase_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
snake_case_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def A_ ( self : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = AutoTokenizer.from_pretrained(lowercase_ )
snake_case_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
snake_case_ = max(len(tokenizer.encode(lowercase_ ) ) for a in ARTICLES )
snake_case_ = max(len(tokenizer.encode(lowercase_ ) ) for a in SUMMARIES )
snake_case_ = 4
snake_case_ = LegacySeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='''train''' , max_source_length=20 , max_target_length=lowercase_ , )
snake_case_ = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def A_ ( self : Any ):
snake_case_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
snake_case_ = tmp_dir.joinpath('''train.source''' ).open().readlines()
snake_case_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(lowercase_ , lowercase_ , 128 , lowercase_ )
snake_case_ = {x.name for x in tmp_dir.iterdir()}
snake_case_ = {x.name for x in save_dir.iterdir()}
snake_case_ = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(lowercase_ ) < len(lowercase_ )
assert len(lowercase_ ) == 1
assert len(packed_examples[0] ) == sum(len(lowercase_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def A_ ( self : Any ):
if not FAIRSEQ_AVAILABLE:
return
snake_case_ ,snake_case_ ,snake_case_ = self._get_dataset(max_len=64 )
snake_case_ = 64
snake_case_ = ds.make_dynamic_sampler(lowercase_ , required_batch_size_multiple=lowercase_ )
snake_case_ = [len(lowercase_ ) for x in batch_sampler]
assert len(set(lowercase_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(lowercase_ ) == len(lowercase_ ) # no dropped or added examples
snake_case_ = DataLoader(lowercase_ , batch_sampler=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
snake_case_ = []
snake_case_ = []
for batch in data_loader:
snake_case_ = batch['''input_ids'''].shape
snake_case_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
snake_case_ = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(lowercase_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(lowercase_ )
assert num_src_per_batch[0] == max(lowercase_ )
if failures:
raise AssertionError(F"too many tokens in {len(lowercase_ )} batches" )
def A_ ( self : List[str] ):
snake_case_ ,snake_case_ ,snake_case_ = self._get_dataset(max_len=512 )
snake_case_ = 2
snake_case_ = ds.make_sortish_sampler(lowercase_ , shuffle=lowercase_ )
snake_case_ = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 )
snake_case_ = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowercase_ )
snake_case_ = tokenizer.pad_token_id
def count_pad_tokens(lowercase_ : Any , lowercase_ : int="input_ids" ):
return [batch[k].eq(lowercase_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(lowercase_ , k='''labels''' ) ) < sum(count_pad_tokens(lowercase_ , k='''labels''' ) )
assert sum(count_pad_tokens(lowercase_ ) ) < sum(count_pad_tokens(lowercase_ ) )
assert len(lowercase_ ) == len(lowercase_ )
def A_ ( self : List[str] , lowercase_ : Tuple=1000 , lowercase_ : Optional[Any]=128 ):
if os.getenv('''USE_REAL_DATA''' , lowercase_ ):
snake_case_ = '''examples/seq2seq/wmt_en_ro'''
snake_case_ = max_len * 2 * 64
if not Path(lowercase_ ).joinpath('''train.len''' ).exists():
save_len_file(lowercase_ , lowercase_ )
else:
snake_case_ = '''examples/seq2seq/test_data/wmt_en_ro'''
snake_case_ = max_len * 4
save_len_file(lowercase_ , lowercase_ )
snake_case_ = AutoTokenizer.from_pretrained(lowercase_ )
snake_case_ = SeqaSeqDataset(
lowercase_ , data_dir=lowercase_ , type_path='''train''' , max_source_length=lowercase_ , max_target_length=lowercase_ , n_obs=lowercase_ , )
return ds, max_tokens, tokenizer
def A_ ( self : Any ):
snake_case_ ,snake_case_ ,snake_case_ = self._get_dataset()
snake_case_ = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=0 , add_extra_examples=lowercase_ ) )
snake_case_ = set(DistributedSortishSampler(lowercase_ , 256 , num_replicas=2 , rank=1 , add_extra_examples=lowercase_ ) )
assert idsa.intersection(lowercase_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def A_ ( self : List[str] , lowercase_ : Optional[Any] ):
snake_case_ = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_ )
if tok_name == MBART_TINY:
snake_case_ = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
snake_case_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
snake_case_ = SeqaSeqDataset(
lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
snake_case_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(lowercase_ ) == 1 if tok_name == BART_TINY else len(lowercase_ ) == 0
| 72 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: List[Any] = 1
UpperCAmelCase_: List[str] = 3
UpperCAmelCase_: str = (32, 32)
UpperCAmelCase_: Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def __snake_case (self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, )
return model
@property
def __snake_case (self ) -> Any:
torch.manual_seed(0 )
UpperCAmelCase_: Any = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, )
return model
@property
def __snake_case (self ) -> Tuple:
torch.manual_seed(0 )
UpperCAmelCase_: List[Any] = RobertaSeriesConfig(
hidden_size=32, project_dim=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=5006, )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
@property
def __snake_case (self ) -> Optional[Any]:
def extract(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ):
class _a :
def __init__(self ) -> List[str]:
UpperCAmelCase_: Any = torch.ones([0] )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: List[str] = self.dummy_cond_unet
UpperCAmelCase_: List[Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.dummy_vae
UpperCAmelCase_: Any = self.dummy_text_encoder
UpperCAmelCase_: Optional[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
UpperCAmelCase_: Union[str, Any] = 77
UpperCAmelCase_: Optional[Any] = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, vae=SCREAMING_SNAKE_CASE_, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_, feature_extractor=self.dummy_extractor, )
UpperCAmelCase_: List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_: Tuple = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCAmelCase_: Any = alt_pipe(
[prompt], generator=SCREAMING_SNAKE_CASE_, guidance_scale=6.0, num_inference_steps=2, output_type="""np""", image=SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: Optional[int] = output.images
UpperCAmelCase_: Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
UpperCAmelCase_: List[str] = alt_pipe(
[prompt], generator=SCREAMING_SNAKE_CASE_, guidance_scale=6.0, num_inference_steps=2, output_type="""np""", image=SCREAMING_SNAKE_CASE_, return_dict=SCREAMING_SNAKE_CASE_, )[0]
UpperCAmelCase_: Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_: int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_: Dict = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""", """This test requires a GPU""" )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Tuple = self.dummy_cond_unet
UpperCAmelCase_: int = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = self.dummy_vae
UpperCAmelCase_: Optional[int] = self.dummy_text_encoder
UpperCAmelCase_: Optional[Any] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
UpperCAmelCase_: Any = 77
UpperCAmelCase_: Optional[int] = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
# put models in fp16
UpperCAmelCase_: str = unet.half()
UpperCAmelCase_: List[str] = vae.half()
UpperCAmelCase_: Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[Any] = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_, scheduler=SCREAMING_SNAKE_CASE_, vae=SCREAMING_SNAKE_CASE_, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_, feature_extractor=self.dummy_extractor, )
UpperCAmelCase_: str = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor, do_normalize=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = """A painting of a squirrel eating a burger"""
UpperCAmelCase_: Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase_: List[Any] = alt_pipe(
[prompt], generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type="""np""", image=SCREAMING_SNAKE_CASE_, ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""", """This test requires a GPU""" )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_: Any = init_image.resize((760, 504) )
UpperCAmelCase_: str = """BAAI/AltDiffusion"""
UpperCAmelCase_: Dict = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_, )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Optional[Any] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase_: str = torch.manual_seed(0 )
UpperCAmelCase_: str = pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, strength=0.7_5, guidance_scale=7.5, generator=SCREAMING_SNAKE_CASE_, output_type="""np""", )
UpperCAmelCase_: Union[str, Any] = output.images[0]
UpperCAmelCase_: List[str] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
UpperCAmelCase_: Union[str, Any] = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def __snake_case (self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
UpperCAmelCase_: int = init_image.resize((768, 512) )
UpperCAmelCase_: Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
UpperCAmelCase_: Any = """BAAI/AltDiffusion"""
UpperCAmelCase_: int = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_, safety_checker=SCREAMING_SNAKE_CASE_, )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
UpperCAmelCase_: List[Any] = """A fantasy landscape, trending on artstation"""
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, strength=0.7_5, guidance_scale=7.5, generator=SCREAMING_SNAKE_CASE_, output_type="""np""", )
UpperCAmelCase_: Optional[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 147 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
a : Optional[Any] = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
a : List[str] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
a : Any = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
a : str = sorted(arg_to_scheduler.keys())
a : Any = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="base", SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = 0
UpperCAmelCase_: Any = Path(self.hparams.output_dir )
UpperCAmelCase_: Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase_: str = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"""num_labels""": num_labels} if num_labels is not None else {}), cache_dir=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PretrainedConfig = config
UpperCAmelCase_: Union[str, Any] = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
assert hasattr(self.config, SCREAMING_SNAKE_CASE_ ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config, SCREAMING_SNAKE_CASE_, getattr(self.hparams, SCREAMING_SNAKE_CASE_ ) )
if tokenizer is None:
UpperCAmelCase_: List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: PreTrainedTokenizer = tokenizer
UpperCAmelCase_: List[Any] = MODEL_MODES[mode]
if model is None:
UpperCAmelCase_: Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path, from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ), config=self.config, cache_dir=SCREAMING_SNAKE_CASE_, )
else:
UpperCAmelCase_: Optional[Any] = model
def __snake_case (self, *SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Any = self.model_type.from_pretrained(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Dict = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase_: Optional[Any] = get_schedule_func(
self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() )
UpperCAmelCase_: Dict = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = self.model
UpperCAmelCase_: str = ["""bias""", """LayerNorm.weight"""]
UpperCAmelCase_: str = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase_: List[str] = Adafactor(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, scale_parameter=SCREAMING_SNAKE_CASE_, relative_step=SCREAMING_SNAKE_CASE_ )
else:
UpperCAmelCase_: Union[str, Any] = AdamW(
SCREAMING_SNAKE_CASE_, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon )
UpperCAmelCase_: Optional[int] = optimizer
UpperCAmelCase_: int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_step(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return self.validation_end(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> int:
UpperCAmelCase_: Tuple = max(1, self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase_: int = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if stage == "test":
UpperCAmelCase_: int = len(self.test_dataloader().dataset )
else:
UpperCAmelCase_: Dict = self.get_dataloader("""train""", self.hparams.train_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = len(self.train_dataloader().dataset )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = False ) -> str:
raise NotImplementedError("""You must implement this for your task""" )
def __snake_case (self ) -> List[str]:
return self.train_loader
def __snake_case (self ) -> int:
return self.get_dataloader("""dev""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Any:
return self.get_dataloader("""test""", self.hparams.eval_batch_size, shuffle=SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Dict:
return os.path.join(
self.hparams.data_dir, """cached_{}_{}_{}""".format(
SCREAMING_SNAKE_CASE_, list(filter(SCREAMING_SNAKE_CASE_, self.hparams.model_name_or_path.split("""/""" ) ) ).pop(), str(self.hparams.max_seq_length ), ), )
@pl.utilities.rank_zero_only
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCAmelCase_: List[str] = self.output_dir.joinpath("""best_tfmr""" )
UpperCAmelCase_: List[Any] = self.step_count
self.model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
@staticmethod
def __snake_case (SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
parser.add_argument(
"""--model_name_or_path""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, required=SCREAMING_SNAKE_CASE_, help="""Path to pretrained model or model identifier from huggingface.co/models""", )
parser.add_argument(
"""--config_name""", default="""""", type=SCREAMING_SNAKE_CASE_, help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""", default=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Pretrained tokenizer name or path if not the same as model_name""", )
parser.add_argument(
"""--cache_dir""", default=str(Path(SCREAMING_SNAKE_CASE_ ).parent / """test_run""" / """cache""" ), type=SCREAMING_SNAKE_CASE_, help="""Where do you want to store the pre-trained models downloaded from huggingface.co""", )
parser.add_argument(
"""--encoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Encoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--decoder_layerdrop""", type=SCREAMING_SNAKE_CASE_, help="""Decoder layer dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--dropout""", type=SCREAMING_SNAKE_CASE_, help="""Dropout probability (Optional). Goes into model.config""", )
parser.add_argument(
"""--attention_dropout""", type=SCREAMING_SNAKE_CASE_, help="""Attention dropout probability (Optional). Goes into model.config""", )
parser.add_argument("""--learning_rate""", default=5E-5, type=SCREAMING_SNAKE_CASE_, help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""", default="""linear""", choices=SCREAMING_SNAKE_CASE_, metavar=SCREAMING_SNAKE_CASE_, type=SCREAMING_SNAKE_CASE_, help="""Learning rate scheduler""", )
parser.add_argument("""--weight_decay""", default=0.0, type=SCREAMING_SNAKE_CASE_, help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""", default=1E-8, type=SCREAMING_SNAKE_CASE_, help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""", default=0, type=SCREAMING_SNAKE_CASE_, help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""", default=4, type=SCREAMING_SNAKE_CASE_, help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""", dest="""max_epochs""", default=3, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--train_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--eval_batch_size""", default=32, type=SCREAMING_SNAKE_CASE_ )
parser.add_argument("""--adafactor""", action="""store_true""" )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Any:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(SCREAMING_SNAKE_CASE_ )
class _a ( pl.Callback ):
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int:
UpperCAmelCase_: Optional[Any] = trainer.lr_schedulers[0]["""scheduler"""]
UpperCAmelCase_: Optional[int] = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Validation results *****""" )
UpperCAmelCase_: int = trainer.callback_metrics
# Log results
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
rank_zero_info("""***** Test results *****""" )
UpperCAmelCase_: Any = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase_: List[Any] = os.path.join(pl_module.hparams.output_dir, """test_results.txt""" )
with open(SCREAMING_SNAKE_CASE_, """w""" ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(SCREAMING_SNAKE_CASE_, str(metrics[key] ) ) )
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """model_checkpoints""" ) , type=lowerCAmelCase__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=lowerCAmelCase__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=lowerCAmelCase__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=lowerCAmelCase__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=lowerCAmelCase__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=lowerCAmelCase__ , default=4_2 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(lowerCAmelCase__ ).parent / """test_run""" / """dummy-train-data""" ) , type=lowerCAmelCase__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def lowerCAmelCase_ (lowerCAmelCase__: BaseTransformer , lowerCAmelCase__: argparse.Namespace , lowerCAmelCase__: Union[str, Any]=None , lowerCAmelCase__: Optional[Any]=True , lowerCAmelCase__: Dict=[] , lowerCAmelCase__: Tuple=None , lowerCAmelCase__: List[str]=None , **lowerCAmelCase__: List[Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
UpperCAmelCase_: Dict = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase_: Dict = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
UpperCAmelCase_: Any = LoggingCallback()
UpperCAmelCase_: Optional[int] = {}
if args.fpaa:
UpperCAmelCase_: List[str] = 1_6
if args.gpus > 1:
UpperCAmelCase_: str = """auto"""
UpperCAmelCase_: Union[str, Any] = """ddp"""
UpperCAmelCase_: Tuple = args.accumulate_grad_batches
UpperCAmelCase_: Optional[int] = None
UpperCAmelCase_: List[Any] = """auto"""
UpperCAmelCase_: Any = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 147 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__magic_name__: Optional[int] = logging.get_logger(__name__)
__magic_name__: Optional[Any] = {
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case__ ( _lowerCAmelCase , _lowerCAmelCase ):
lowercase__ : int = '''swin'''
lowercase__ : Optional[Any] = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , lowerCAmelCase__=2_24 , lowerCAmelCase__=4 , lowerCAmelCase__=3 , lowerCAmelCase__=96 , lowerCAmelCase__=[2, 2, 6, 2] , lowerCAmelCase__=[3, 6, 12, 24] , lowerCAmelCase__=7 , lowerCAmelCase__=4.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-5 , lowerCAmelCase__=32 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(**lowerCAmelCase__ )
__magic_name__ : Tuple = image_size
__magic_name__ : Any = patch_size
__magic_name__ : Dict = num_channels
__magic_name__ : List[str] = embed_dim
__magic_name__ : str = depths
__magic_name__ : Tuple = len(lowerCAmelCase__ )
__magic_name__ : Tuple = num_heads
__magic_name__ : Tuple = window_size
__magic_name__ : Any = mlp_ratio
__magic_name__ : List[Any] = qkv_bias
__magic_name__ : List[Any] = hidden_dropout_prob
__magic_name__ : str = attention_probs_dropout_prob
__magic_name__ : List[Any] = drop_path_rate
__magic_name__ : Tuple = hidden_act
__magic_name__ : Optional[int] = use_absolute_embeddings
__magic_name__ : Optional[Any] = layer_norm_eps
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__magic_name__ : Optional[int] = int(embed_dim * 2 ** (len(lowerCAmelCase__ ) - 1) )
__magic_name__ : List[str] = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(lowerCAmelCase__ ) + 1 )]
__magic_name__ ,__magic_name__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=lowerCAmelCase__ , out_indices=lowerCAmelCase__ , stage_names=self.stage_names )
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = version.parse('''1.11''' )
@property
def __magic_name__ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ ( self ) -> float:
return 1e-4
| 138 |
__magic_name__: List[Any] = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
__magic_name__: Dict = [{"type": "code", "content": INSTALL_CONTENT}]
__magic_name__: Optional[Any] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 138 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = torch.device("cpu")
def lowerCAmelCase__ ( ) -> Optional[int]:
"""simple docstring"""
snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
return im
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case = dct.pop(_lowerCAmelCase )
snake_case = val
def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case = []
for k in state_dict.keys():
snake_case = k
if ".pwconv" in k:
snake_case = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
snake_case = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
snake_case = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
snake_case = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
snake_case = k_new.split('.' )
if ls[2].isdigit():
snake_case = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
snake_case = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> List[Any]:
"""simple docstring"""
snake_case = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case = 1_0_0_0
snake_case = "huggingface/label-files"
snake_case = "imagenet-1k-id2label.json"
snake_case = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case = [3, 3, 6, 4]
snake_case = [4_8, 5_6, 1_1_2, 2_2_0]
elif swiftformer_name == "swiftformer_s":
snake_case = [3, 3, 9, 6]
snake_case = [4_8, 6_4, 1_6_8, 2_2_4]
elif swiftformer_name == "swiftformer_l1":
snake_case = [4, 3, 1_0, 5]
snake_case = [4_8, 9_6, 1_9_2, 3_8_4]
elif swiftformer_name == "swiftformer_l3":
snake_case = [4, 4, 1_2, 6]
snake_case = [6_4, 1_2_8, 3_2_0, 5_1_2]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
snake_case = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='cpu' , check_hash=_lowerCAmelCase )
else:
snake_case = torch.load(_lowerCAmelCase , map_location='cpu' )
snake_case = checkpoint
snake_case = create_rename_keys(_lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# load HuggingFace model
snake_case = SwiftFormerForImageClassification(_lowerCAmelCase ).eval()
hf_model.load_state_dict(_lowerCAmelCase )
# prepare test inputs
snake_case = prepare_img()
snake_case = ViTImageProcessor.from_pretrained('preprocessor_config' )
snake_case = processor(images=_lowerCAmelCase , return_tensors='pt' )
# compare outputs from both models
snake_case = get_expected_output(_lowerCAmelCase )
snake_case = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_0_0_0] )
assert torch.allclose(hf_logits[0, 0:5] , _lowerCAmelCase , atol=1e-3 )
Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase )
print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 150 |
from math import sqrt
def A_ ( _lowerCAmelCase ) -> bool:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase : List[Any] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase : List[Any] = False
for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase : Union[str, Any] = False
break
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool"
return status
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase : int = list(range(2 , n + 1 ) )
UpperCamelCase : Optional[int] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_lowerCAmelCase ) ):
for j in range(i + 1 , len(_lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase : Tuple = 0
# filters actual prime numbers.
UpperCamelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase : str = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_lowerCAmelCase ):
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase : Optional[Any] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase : Tuple = 2
UpperCamelCase : str = number
if number == 0 or number == 1:
ans.append(_lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_lowerCAmelCase ):
while quotient != 1:
if is_prime(_lowerCAmelCase ) and (quotient % factor == 0):
ans.append(_lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list"
return ans
def A_ ( _lowerCAmelCase ) -> Any:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase : List[Any] = 0
# prime factorization of 'number'
UpperCamelCase : Any = prime_factorization(_lowerCAmelCase )
UpperCamelCase : List[Any] = max(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase ) -> Union[str, Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase : List[Any] = 0
# prime factorization of 'number'
UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase )
UpperCamelCase : List[Any] = min(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int"
return ans
def A_ ( _lowerCAmelCase ) -> Optional[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def A_ ( _lowerCAmelCase ) -> List[Any]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def A_ ( _lowerCAmelCase ) -> Any:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase )
), "'number' must been an int, even and > 2"
UpperCamelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase )
UpperCamelCase : Tuple = len(_lowerCAmelCase )
# run variable for while-loops.
UpperCamelCase : Optional[int] = 0
UpperCamelCase : int = None
# exit variable. for break up the loops
UpperCamelCase : Union[str, Any] = True
while i < len_pn and loop:
UpperCamelCase : Tuple = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase : Any = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (len(_lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase : Tuple = 0
while numbera != 0:
UpperCamelCase : Tuple = numbera % numbera
UpperCamelCase : Any = numbera
UpperCamelCase : Union[str, Any] = rest
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase : Optional[int] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase )
elif numbera == 1 or numbera == 1:
UpperCamelCase : Optional[Any] = []
UpperCamelCase : int = []
UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase )
UpperCamelCase : Optional[int] = 0
UpperCamelCase : Tuple = 0
UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase )
UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase )
for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ):
ans *= n
else:
UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
ans *= n
done.append(_lowerCAmelCase )
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def A_ ( _lowerCAmelCase ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase : int = 0
UpperCamelCase : int = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime(
_lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
assert (
is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase : str = p_number_a + 1 # jump to the next number
UpperCamelCase : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(_lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(_lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(_lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def A_ ( _lowerCAmelCase ) -> List[str]:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase : Dict = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def A_ ( _lowerCAmelCase ) -> int:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase : int = get_divisors(_lowerCAmelCase )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(_lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]:
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) )
# precondition
assert (
isinstance(_lowerCAmelCase , _lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def A_ ( _lowerCAmelCase ) -> Dict:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase : str = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def A_ ( _lowerCAmelCase ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase : Dict = 0
UpperCamelCase : Dict = 1
UpperCamelCase : Union[str, Any] = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase : Any = ans
ans += fiba
UpperCamelCase : str = tmp
return ans
| 52 | 0 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
A : str = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def __lowerCamelCase ( __a :List[Any] ) -> List[str]:
"""simple docstring"""
A__ = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
A : str = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def __lowerCamelCase ( __a :List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = list(s_dict.keys() )
for key in keys:
A__ = key
for k, v in WHISPER_MAPPING.items():
if k in key:
A__ = new_key.replace(_snake_case , _snake_case )
print(F'{key} -> {new_key}' )
A__ = s_dict.pop(_snake_case )
return s_dict
def __lowerCamelCase ( __a :int ) -> str:
"""simple docstring"""
A__ = emb.weight.shape
A__ = nn.Linear(_snake_case , _snake_case , bias=_snake_case )
A__ = emb.weight.data
return lin_layer
def __lowerCamelCase ( __a :str , __a :str ) -> bytes:
"""simple docstring"""
os.makedirs(_snake_case , exist_ok=_snake_case )
A__ = os.path.basename(_snake_case )
A__ = url.split("""/""" )[-2]
A__ = os.path.join(_snake_case , _snake_case )
if os.path.exists(_snake_case ) and not os.path.isfile(_snake_case ):
raise RuntimeError(F'{download_target} exists and is not a regular file' )
if os.path.isfile(_snake_case ):
A__ = open(_snake_case , """rb""" ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'{download_target} exists, but the SHA256 checksum does not match; re-downloading the file' )
with urllib.request.urlopen(_snake_case ) as source, open(_snake_case , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=8_0 , unit="""iB""" , unit_scale=_snake_case , unit_divisor=1_0_2_4 ) as loop:
while True:
A__ = source.read(8_1_9_2 )
if not buffer:
break
output.write(_snake_case )
loop.update(len(_snake_case ) )
A__ = open(_snake_case , """rb""" ).read()
if hashlib.shaaaa(_snake_case ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def __lowerCamelCase ( __a :Any , __a :int ) -> str:
"""simple docstring"""
if ".pt" not in checkpoint_path:
A__ = _download(_MODELS[checkpoint_path] )
else:
A__ = torch.load(_snake_case , map_location="""cpu""" )
A__ = original_checkpoint['''dims''']
A__ = original_checkpoint['''model_state_dict''']
A__ = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(_snake_case )
rename_keys(_snake_case )
A__ = True
A__ = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
A__ = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=_snake_case , decoder_ffn_dim=_snake_case , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
A__ = WhisperForConditionalGeneration(_snake_case )
A__ = model.model.load_state_dict(_snake_case , strict=_snake_case )
if len(_snake_case ) > 0 and not set(_snake_case ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
F' but all the following weights are missing {missing}' )
if tie_embeds:
A__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ = proj_out_weights
model.save_pretrained(_snake_case )
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A : List[str] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 352 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
A : List[str] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A : Tuple = parser.parse_args()
if args.model_type == "bert":
A : Dict = BertForMaskedLM.from_pretrained(args.model_name)
A : List[str] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
A : Optional[Any] = model.state_dict()
A : int = {}
for w in ["word_embeddings", "position_embeddings"]:
A : str = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
A : Any = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
A : Tuple = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
A : Optional[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
A : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
A : List[Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
A : Union[str, Any] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
A : List[str] = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
A : int = state_dict['''cls.predictions.decoder.weight''']
A : str = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
A : List[Any] = state_dict[F'''cls.predictions.transform.dense.{w}''']
A : List[str] = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 276 | 0 |
from __future__ import annotations
from math import pi
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Any:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 |
"""simple docstring"""
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
A_ : str = False
if num < 0:
A_ : Dict = True
A_ : Union[str, Any] = -num
A_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(_UpperCAmelCase ) for e in binary )
return "0b" + "".join(str(_UpperCAmelCase ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 286 | 0 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_SCREAMING_SNAKE_CASE : int = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=None ):
"""simple docstring"""
require_version(deps[pkg] ,UpperCamelCase_ )
| 213 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = UnCLIPImageVariationPipeline
__magic_name__ = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__magic_name__ = IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , )
return CLIPVisionModelWithProjection(__snake_case )
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
snake_case = UnCLIPTextProjModel(**__snake_case )
return model
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''sample_size''': 3_2,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"sample_size": 6_4,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def a_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
snake_case = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_decoder
snake_case = self.dummy_text_proj
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_super_res_first
snake_case = self.dummy_super_res_last
snake_case = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1_0_0_0 , )
snake_case = CLIPImageProcessor(crop_size=3_2 , size=3_2 )
snake_case = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def a_ ( self , __snake_case , __snake_case=0 , __snake_case=True ):
snake_case = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
if pil_image:
snake_case = input_image * 0.5 + 0.5
snake_case = input_image.clamp(0 , 1 )
snake_case = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
snake_case = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
snake_case = pipe(**__snake_case )
snake_case = output.images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
snake_case = pipe(
**__snake_case , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 6_4, 6_4, 3)
snake_case = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a_ ( self ):
snake_case = torch.device('''cpu''' )
class A__ :
"""simple docstring"""
__magic_name__ = 1
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device=__snake_case ).manual_seed(0 )
snake_case = pipe.decoder.dtype
snake_case = 1
snake_case = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
snake_case = pipe.prepare_latents(
__snake_case , dtype=__snake_case , device=__snake_case , generator=__snake_case , latents=__snake_case , scheduler=DummyScheduler() )
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case ).images
snake_case = self.get_dummy_inputs(__snake_case , pil_image=__snake_case )
# Don't pass image, instead pass embedding
snake_case = pipeline_inputs.pop('''image''' )
snake_case = pipe.image_encoder(__snake_case ).image_embeds
snake_case = pipe(
**__snake_case , decoder_latents=__snake_case , super_res_latents=__snake_case , image_embeddings=__snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
snake_case = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=__snake_case , expected_max_diff=__snake_case )
@skip_mps
def a_ ( self ):
snake_case = torch_device == '''cpu'''
snake_case = True
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=__snake_case , relax_max_difference=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
def a_ ( self ):
snake_case = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
snake_case = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=__snake_case , additional_params_copy_to_batched_inputs=__snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=__snake_case )
@skip_mps
def a_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def a_ ( self ):
return super().test_save_load_local()
@skip_mps
def a_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
snake_case = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case = pipeline(
__snake_case , generator=__snake_case , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
assert_mean_pixel_difference(__snake_case , __snake_case , 1_5 )
| 213 | 1 |
"""simple docstring"""
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowerCAmelCase__ : str = ['text', 'image', 'audio']
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(lowerCamelCase , lowerCamelCase ):
inputs.append(create_inputs(lowerCamelCase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def a_ ( lowerCamelCase ):
UpperCAmelCase__ = []
for output in outputs:
if isinstance(lowerCamelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class snake_case :
"""simple docstring"""
def __lowerCAmelCase ( self : Any ):
self.assertTrue(hasattr(self.tool ,'inputs' ) )
self.assertTrue(hasattr(self.tool ,'outputs' ) )
UpperCAmelCase__ = self.tool.inputs
for _input in inputs:
if isinstance(_input ,lowerCamelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
UpperCAmelCase__ = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
UpperCAmelCase__ = [outputs]
self.assertListEqual(output_types(lowerCamelCase__ ) ,self.tool.outputs )
def __lowerCAmelCase ( self : List[str] ):
self.assertTrue(hasattr(self.tool ,'description' ) )
self.assertTrue(hasattr(self.tool ,'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase__ ) ,len(self.tool.outputs ) )
for output, output_type in zip(lowerCamelCase__ ,self.tool.outputs ):
UpperCAmelCase__ = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(lowerCamelCase__ ,lowerCamelCase__ ) )
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = create_inputs(self.tool.inputs )
UpperCAmelCase__ = []
for _input, input_type in zip(lowerCamelCase__ ,self.tool.inputs ):
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
UpperCAmelCase__ = self.tool(*lowerCamelCase__ )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [outputs]
self.assertEqual(len(lowerCamelCase__ ) ,len(self.tool.outputs ) )
| 98 | """simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> int:
return None
class UpperCamelCase__:
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
return None
class UpperCamelCase__( unittest.TestCase ):
lowerCAmelCase__ : Tuple = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> int:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
@require_torch
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(__UpperCAmelCase ) )
vocab_file.flush()
A__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A__ = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) )
model.save_pretrained(__UpperCAmelCase )
self._test_export(__UpperCAmelCase ,'pt' ,12 ,__UpperCAmelCase )
@require_tf
@slow
def snake_case__ ( self ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'tf' ,12 ,**__UpperCAmelCase )
A__ = quantize(Path(__UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def snake_case__ ( self ) -> str:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A__ = self._test_export(__UpperCAmelCase ,'pt' ,12 ,**__UpperCAmelCase )
A__ = quantize(__UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Union[str, Any]:
try:
# Compute path
with TemporaryDirectory() as tempdir:
A__ = Path(__UpperCAmelCase ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase )
return path
except Exception as e:
self.fail(__UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import BertModel
A__ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'pt' )
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self ) -> Optional[Any]:
from transformers import TFBertModel
A__ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
A__ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(__UpperCAmelCase ,__UpperCAmelCase ,'tf' )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> str:
A__ = FeatureExtractionPipeline(__UpperCAmelCase ,__UpperCAmelCase )
A__ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
A__ , A__ , A__ , A__ = infer_shapes(__UpperCAmelCase ,__UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,__UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] ,__UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = ['input_ids', 'attention_mask', 'token_type_ids']
A__ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
A__ , A__ = ensure_valid_input(FuncContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCAmelCase ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCAmelCase ) ,set(__UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCAmelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A__ , A__ = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCAmelCase ,__UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCAmelCase ) ,1 )
self.assertEqual(len(__UpperCAmelCase ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] ,'input_ids' )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
| 221 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = ["""image_processor""", """tokenizer"""]
lowerCAmelCase : str = """CLIPImageProcessor"""
lowerCAmelCase : str = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ ):
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase__ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
def __call__( self , UpperCAmelCase__=None , UpperCAmelCase__=None , UpperCAmelCase__=None , **UpperCAmelCase__ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if images is not None:
A__ = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
if text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase__ ) , tensor_type=UpperCAmelCase__ )
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __A ( self , *UpperCAmelCase__ , **UpperCAmelCase__ ):
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def __A ( self ):
A__ = self.tokenizer.model_input_names
A__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase__ , )
return self.image_processor
| 198 |
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
A__ = ""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCamelCase ( _A : str )-> dict[str, str]:
"""simple docstring"""
A__ = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
A__ = remove_duplicates(key.upper() )
A__ = len(_A )
# First fill cipher with key characters
A__ = {alphabet[i]: char for i, char in enumerate(_A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_A ) , 26 ):
A__ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
A__ = alphabet[i - offset]
A__ = char
return cipher_alphabet
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
return "".join(cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( _A : str , _A : dict[str, str] )-> str:
"""simple docstring"""
A__ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_A , _A ) for ch in message.upper() )
def UpperCamelCase ( )-> None:
"""simple docstring"""
A__ = input("Enter message to encode or decode: " ).strip()
A__ = input("Enter keyword: " ).strip()
A__ = input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
A__ = {"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
A__ = create_cipher_map(_A )
print(func(_A , _A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 198 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "swinv2"
lowercase = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , snake_case_ : int=224 , snake_case_ : List[Any]=4 , snake_case_ : List[Any]=3 , snake_case_ : Optional[Any]=96 , snake_case_ : str=[2, 2, 6, 2] , snake_case_ : Tuple=[3, 6, 12, 24] , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=4.0 , snake_case_ : Optional[int]=True , snake_case_ : Any=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=False , snake_case_ : List[str]=0.02 , snake_case_ : Dict=1E-5 , snake_case_ : Optional[int]=32 , **snake_case_ : Dict , ):
super().__init__(**snake_case_ )
snake_case__ : Optional[int] = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : str = embed_dim
snake_case__ : List[str] = depths
snake_case__ : int = len(snake_case_ )
snake_case__ : Union[str, Any] = num_heads
snake_case__ : Tuple = window_size
snake_case__ : str = mlp_ratio
snake_case__ : Optional[Any] = qkv_bias
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : Tuple = hidden_act
snake_case__ : str = use_absolute_embeddings
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
snake_case__ : Dict = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : List[str] = int(embed_dim * 2 ** (len(snake_case_ ) - 1) )
snake_case__ : Tuple = (0, 0, 0, 0)
| 35 |
import os
import sys
a =os.path.join(os.path.dirname(__file__), """src""")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a =[
"""torch""",
"""numpy""",
"""tokenizers""",
"""filelock""",
"""requests""",
"""tqdm""",
"""regex""",
"""sentencepiece""",
"""sacremoses""",
"""importlib_metadata""",
"""huggingface_hub""",
]
@add_start_docstrings(AutoConfig.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> int:
return AutoConfig.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModel.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> List[str]:
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def SCREAMING_SNAKE_CASE__ ( *lowerCamelCase__ , **lowerCamelCase__ ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase__ , **lowerCamelCase__ )
| 73 | 0 |
"""simple docstring"""
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = """T5Config"""
def lowercase ( a__ : jnp.array , a__ : int , a__ : int ) -> jnp.ndarray:
_UpperCamelCase = jnp.zeros_like(a__ )
_UpperCamelCase = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
_UpperCamelCase = shifted_input_ids.at[:, 0].set(a__ )
_UpperCamelCase = jnp.where(shifted_input_ids == -100 , a__ , a__ )
return shifted_input_ids
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''mt5'''
snake_case__ = MTaConfig
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''mt5'''
snake_case__ = MTaConfig
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''mt5'''
snake_case__ = MTaConfig
| 359 | """simple docstring"""
def lowercase ( a__ : Tuple , a__ : str ) -> Tuple:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowercase ( a__ : Optional[int] , a__ : List[str]=0 ) -> Optional[Any]:
return sorted(a__ , key=lambda a__ : x[column] )
def lowercase ( a__ : Optional[int] , a__ : Optional[int] , a__ : Tuple=float('''inf''' ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Optional[Any]=float('''inf''' ) ) -> str:
for i in range(min(6 , points_counts - 1 ) , a__ ):
for j in range(max(0 , i - 6 ) , a__ ):
_UpperCamelCase = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
_UpperCamelCase = current_dis
return min_dis
def lowercase ( a__ : int , a__ : str , a__ : Any ) -> str:
# base case
if points_counts <= 3:
return dis_between_closest_pair(a__ , a__ )
# recursion
_UpperCamelCase = points_counts // 2
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[:mid] , a__ )
_UpperCamelCase = closest_pair_of_points_sqr(
a__ , points_sorted_on_y[mid:] , points_counts - mid )
_UpperCamelCase = min(a__ , a__ )
_UpperCamelCase = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(a__ )
_UpperCamelCase = dis_between_closest_in_strip(
a__ , len(a__ ) , a__ )
return min(a__ , a__ )
def lowercase ( a__ : Dict , a__ : List[Any] ) -> Optional[Any]:
_UpperCamelCase = column_based_sort(a__ , column=0 )
_UpperCamelCase = column_based_sort(a__ , column=1 )
return (
closest_pair_of_points_sqr(
a__ , a__ , a__ )
) ** 0.5
if __name__ == "__main__":
UpperCAmelCase = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("""Distance:""", closest_pair_of_points(points, len(points)))
| 54 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> list:
'''simple docstring'''
A__ = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
# use last results for better performance - dynamic programming
A__ = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
A__ = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
A__ = j
return prefix_result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> int:
'''simple docstring'''
return max(prefix_function(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
'''simple docstring'''
from __future__ import annotations
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(__A )
first_sum += 1 / float(__A )
index += 1
return 1 / first_sum
def _UpperCamelCase ( __A ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0.00
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'''Resistor at index {index} has a negative value!'''
raise ValueError(__A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 80 | 0 |
"""simple docstring"""
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowerCAmelCase : str = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
lowerCAmelCase : List[Any] = """sshleifer/student_marian_en_ro_6_1"""
lowerCAmelCase : int = """sshleifer/tiny-mbart"""
@require_torch
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def _lowerCAmelCase ( self , _a=False , _a=None , _a=True , _a=True , _a=True , _a=True , ):
"""simple docstring"""
lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_a , num_train_epochs=1 , distributed=_a , extra_args_str=_a , predict_with_generate=_a , do_train=_a , do_eval=_a , do_predict=_a , )
lowerCamelCase = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history
if not do_eval:
return
lowerCamelCase = [log for log in logs if """eval_loss""" in log.keys()]
lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , _a )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a )
@require_torch_multi_gpu
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=_a )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=_a , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=_a )
@require_apex
@require_torch_gpu
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_a , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
lowerCamelCase = experiments[experiment_id]
lowerCamelCase = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
lowerCamelCase = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_a , extra_args_str=data["""extra_args_str"""] )
lowerCamelCase = len(re.findall(_a , cl.err ) )
self.assertEqual(_a , data["""n_matches"""] )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_a , learning_rate=3e-4 , num_train_epochs=10 , distributed=_a , )
# Check metrics
lowerCamelCase = TrainerState.load_from_json(os.path.join(_a , """trainer_state.json""" ) ).log_history
lowerCamelCase = [log for log in logs if """eval_loss""" in log.keys()]
lowerCamelCase = eval_metrics[0]
lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , _a )
# test if do_predict saves generations and metrics
lowerCamelCase = os.listdir(_a )
lowerCamelCase = {os.path.basename(_a ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def _lowerCAmelCase ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_a ) -> Tuple[int, float]:
lowerCamelCase = """--skip_memory_metrics 0"""
lowerCamelCase = self.run_trainer(
max_len=128 , model_name=_a , learning_rate=3e-4 , num_train_epochs=1 , optim=_a , distributed=_a , extra_args_str=_a , do_eval=_a , do_predict=_a , n_gpus_to_use=1 , )
# Check metrics
lowerCamelCase = TrainerState.load_from_json(Path(_a , """trainer_state.json""" ) ).log_history
lowerCamelCase = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
lowerCamelCase = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
lowerCamelCase = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowerCamelCase , lowerCamelCase , lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowerCamelCase , lowerCamelCase , lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowerCamelCase = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_a , _a , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
f' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' , )
self.assertGreater(
_a , _a , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
f' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' , )
self.assertEqual(
_a , _a , f'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def _lowerCAmelCase ( self , _a , _a , _a , _a = 3e-3 , _a = "adafactor" , _a = False , _a = None , _a = 0 , _a = True , _a = True , _a = True , _a = True , _a = None , ):
"""simple docstring"""
lowerCamelCase = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
lowerCamelCase = self.get_auto_remove_tmp_dir()
lowerCamelCase = f'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_a )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_a )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
lowerCamelCase = f'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_a )}\n '.split()
lowerCamelCase = """
--do_predict
""".split()
lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowerCamelCase = get_gpu_count()
lowerCamelCase = get_torch_dist_unique_port()
lowerCamelCase = f'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_a , env=self.get_env() )
else:
lowerCamelCase = ["""run_translation.py"""] + args
with patch.object(_a , """argv""" , _a ):
main()
return output_dir
| 355 |
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = 0
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = AutoImageProcessor.from_pretrained("""openai/clip-vit-base-patch32""" )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowerCamelCase = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop("""image_processor_type""" )
lowerCamelCase = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
lowerCamelCase = json.loads(config.to_json_string() )
self.assertTrue("""_processor_class""" not in dict_as_saved )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
json.dump(
{"""image_processor_type""": """CLIPImageProcessor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """clip-base is not a local folder and is not a valid model identifier""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""clip-base""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , revision="""aaaaaa""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
with self.assertRaisesRegex(
_a , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/config-no-model""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , """NewImageProcessor""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCamelCase = Path(_a ) / """preprocessor_config.json"""
lowerCamelCase = Path(_a ) / """config.json"""
json.dump(
{"""feature_extractor_type""": """CLIPFeatureExtractor""", """processor_class""": """CLIPProcessor"""} , open(_a , """w""" ) , )
json.dump({"""model_type""": """clip"""} , open(_a , """w""" ) )
lowerCamelCase = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
lowerCamelCase = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCAmelCase ( self ):
"""simple docstring"""
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = True
try:
AutoConfig.register("""custom""" , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
lowerCamelCase = AutoImageProcessor.from_pretrained("""hf-internal-testing/test_dynamic_image_processor""" )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowerCamelCase = AutoImageProcessor.from_pretrained(
"""hf-internal-testing/test_dynamic_image_processor""" , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , """NewImageProcessor""" )
self.assertTrue(not hasattr(_a , """is_local""" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 168 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 4000000 ) -> int:
'''simple docstring'''
A__ = [0, 1]
A__ = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
A__ = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class A ( _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase = 't5'
lowerCamelCase = ['past_key_values']
lowerCamelCase = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : Union[str, Any],lowercase_ : int=3_2_1_2_8,lowercase_ : int=5_1_2,lowercase_ : List[str]=6_4,lowercase_ : Tuple=2_0_4_8,lowercase_ : Any=6,lowercase_ : List[str]=None,lowercase_ : Union[str, Any]=8,lowercase_ : int=3_2,lowercase_ : Dict=1_2_8,lowercase_ : Optional[int]=0.1,lowercase_ : List[str]=1E-6,lowercase_ : Tuple=1.0,lowercase_ : Any="relu",lowercase_ : Union[str, Any]=True,lowercase_ : Optional[Any]=True,lowercase_ : int=0,lowercase_ : str=1,**lowercase_ : str,)-> Any:
'''simple docstring'''
A__ = vocab_size
A__ = d_model
A__ = d_kv
A__ = d_ff
A__ = num_layers
A__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A__ = num_heads
A__ = relative_attention_num_buckets
A__ = relative_attention_max_distance
A__ = dropout_rate
A__ = layer_norm_epsilon
A__ = initializer_factor
A__ = feed_forward_proj
A__ = use_cache
A__ = self.feed_forward_proj.split('-' )
A__ = act_info[-1]
A__ = act_info[0] == 'gated'
if len(lowercase_ ) > 1 and act_info[0] != "gated" or len(lowercase_ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A__ = 'gelu_new'
super().__init__(
pad_token_id=lowercase_,eos_token_id=lowercase_,is_encoder_decoder=lowercase_,**lowercase_,)
class A ( _UpperCAmelCase ):
"""simple docstring"""
@property
def snake_case__ ( self : Tuple )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
A__ = 'past_encoder_sequence + sequence'
A__ = {0: 'batch'}
A__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
A__ = {0: 'batch', 1: 'decoder_sequence'}
A__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowercase_,direction='inputs' )
return common_inputs
@property
def snake_case__ ( self : Any )-> int:
'''simple docstring'''
return 1_3
| 7 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
__SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser(InitializationArguments)
__SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
__SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
__SCREAMING_SNAKE_CASE : Optional[Any] = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
__SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
__SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub) | 284 | from __future__ import annotations
import requests
__SCREAMING_SNAKE_CASE : Tuple = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def snake_case (__lowercase , __lowercase = 1 , __lowercase = "new" , __lowercase = None ) -> dict:
'''simple docstring'''
_snake_case : Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(__lowercase ) - valid_terms ) ):
_snake_case : List[str] = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(__lowercase )
_snake_case : Any = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
_snake_case : Optional[int] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(__lowercase )}
_snake_case : Union[str, Any] = {}
for id_ in range(__lowercase ):
_snake_case : Dict = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext'])) | 284 | 1 |
"""simple docstring"""
from math import sqrt
def snake_case_ ( A_ : int ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' must been an int and positive"
_lowerCamelCase : Optional[int] = True
# 0 and 1 are none primes.
if number <= 1:
_lowerCamelCase : Tuple = False
for divisor in range(2, int(round(sqrt(A_ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_lowerCamelCase : str = False
break
# precondition
assert isinstance(A_, A_ ), "'status' must been from type bool"
return status
def snake_case_ ( A_ : Any ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_lowerCamelCase : Dict = list(range(2, n + 1 ) )
_lowerCamelCase : Dict = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(A_ ) ):
for j in range(i + 1, len(A_ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_lowerCamelCase : Optional[Any] = 0
# filters actual prime numbers.
_lowerCamelCase : str = [x for x in begin_list if x != 0]
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type list"
return ans
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n > 2), "'N' must been an int and > 2"
_lowerCamelCase : Union[str, Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2, n + 1 ):
if is_prime(A_ ):
ans.append(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type list"
return ans
def snake_case_ ( A_ : str ):
'''simple docstring'''
assert isinstance(A_, A_ ) and number >= 0, "'number' must been an int and >= 0"
_lowerCamelCase : Tuple = [] # this list will be returns of the function.
# potential prime number factors.
_lowerCamelCase : int = 2
_lowerCamelCase : Optional[int] = number
if number == 0 or number == 1:
ans.append(A_ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(A_ ):
while quotient != 1:
if is_prime(A_ ) and (quotient % factor == 0):
ans.append(A_ )
quotient /= factor
else:
factor += 1
else:
ans.append(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type list"
return ans
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCamelCase : Union[str, Any] = 0
# prime factorization of 'number'
_lowerCamelCase : List[Any] = prime_factorization(A_ )
_lowerCamelCase : Any = max(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type int"
return ans
def snake_case_ ( A_ : Tuple ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number >= 0
), "'number' bust been an int and >= 0"
_lowerCamelCase : Dict = 0
# prime factorization of 'number'
_lowerCamelCase : Dict = prime_factorization(A_ )
_lowerCamelCase : Optional[Any] = min(A_ )
# precondition
assert isinstance(A_, A_ ), "'ans' must been from type int"
return ans
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ), "'number' must been an int"
assert isinstance(number % 2 == 0, A_ ), "compare bust been from type bool"
return number % 2 == 0
def snake_case_ ( A_ : str ):
'''simple docstring'''
assert isinstance(A_, A_ ), "'number' must been an int"
assert isinstance(number % 2 != 0, A_ ), "compare bust been from type bool"
return number % 2 != 0
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert (
isinstance(A_, A_ ) and (number > 2) and is_even(A_ )
), "'number' must been an int, even and > 2"
_lowerCamelCase : List[str] = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_lowerCamelCase : List[Any] = get_prime_numbers(A_ )
_lowerCamelCase : str = len(A_ )
# run variable for while-loops.
_lowerCamelCase : Tuple = 0
_lowerCamelCase : Dict = None
# exit variable. for break up the loops
_lowerCamelCase : Tuple = True
while i < len_pn and loop:
_lowerCamelCase : Dict = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_lowerCamelCase : Tuple = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(A_, A_ )
and (len(A_ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def snake_case_ ( A_ : Optional[int], A_ : Any ):
'''simple docstring'''
assert (
isinstance(A_, A_ )
and isinstance(A_, A_ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_lowerCamelCase : str = 0
while numbera != 0:
_lowerCamelCase : Tuple = numbera % numbera
_lowerCamelCase : int = numbera
_lowerCamelCase : Tuple = rest
# precondition
assert isinstance(A_, A_ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def snake_case_ ( A_ : Union[str, Any], A_ : str ):
'''simple docstring'''
assert (
isinstance(A_, A_ )
and isinstance(A_, A_ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_lowerCamelCase : str = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_lowerCamelCase : Any = prime_factorization(A_ )
_lowerCamelCase : int = prime_factorization(A_ )
elif numbera == 1 or numbera == 1:
_lowerCamelCase : Dict = []
_lowerCamelCase : Any = []
_lowerCamelCase : Optional[int] = max(A_, A_ )
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_lowerCamelCase : Optional[int] = prime_fac_a.count(A_ )
_lowerCamelCase : List[str] = prime_fac_a.count(A_ )
for _ in range(max(A_, A_ ) ):
ans *= n
else:
_lowerCamelCase : int = prime_fac_a.count(A_ )
for _ in range(A_ ):
ans *= n
done.append(A_ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_lowerCamelCase : Optional[int] = prime_fac_a.count(A_ )
for _ in range(A_ ):
ans *= n
done.append(A_ )
# precondition
assert isinstance(A_, A_ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def snake_case_ ( A_ : Optional[int] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 0), "'number' must been a positive int"
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(A_ ):
ans += 1
# precondition
assert isinstance(A_, A_ ) and is_prime(
A_ ), "'ans' must been a prime number and from type int"
return ans
def snake_case_ ( A_ : Optional[int], A_ : Optional[Any] ):
'''simple docstring'''
assert (
is_prime(A_ ) and is_prime(A_ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_lowerCamelCase : Dict = p_number_a + 1 # jump to the next number
_lowerCamelCase : Dict = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(A_ ):
number += 1
while number < p_number_a:
ans.append(A_ )
number += 1
# fetch the next prime number.
while not is_prime(A_ ):
number += 1
# precondition
assert (
isinstance(A_, A_ )
and ans[0] != p_number_a
and ans[len(A_ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def snake_case_ ( A_ : List[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 1), "'n' must been int and >= 1"
_lowerCamelCase : Optional[Any] = [] # will be returned.
for divisor in range(1, n + 1 ):
if n % divisor == 0:
ans.append(A_ )
# precondition
assert ans[0] == 1 and ans[len(A_ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def snake_case_ ( A_ : List[str] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (
number > 1
), "'number' must been an int and >= 1"
_lowerCamelCase : List[Any] = get_divisors(A_ )
# precondition
assert (
isinstance(A_, A_ )
and (divisors[0] == 1)
and (divisors[len(A_ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def snake_case_ ( A_ : int, A_ : str ):
'''simple docstring'''
assert (
isinstance(A_, A_ )
and isinstance(A_, A_ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_lowerCamelCase : List[Any] = gcd(abs(A_ ), abs(A_ ) )
# precondition
assert (
isinstance(A_, A_ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def snake_case_ ( A_ : Optional[Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 0), "'n' must been a int and >= 0"
_lowerCamelCase : Tuple = 1 # this will be return.
for factor in range(1, n + 1 ):
ans *= factor
return ans
def snake_case_ ( A_ : Union[str, Any] ):
'''simple docstring'''
assert isinstance(A_, A_ ) and (n >= 0), "'n' must been an int and >= 0"
_lowerCamelCase : Any = 0
_lowerCamelCase : Union[str, Any] = 1
_lowerCamelCase : int = 1 # this will be return
for _ in range(n - 1 ):
_lowerCamelCase : int = ans
ans += fiba
_lowerCamelCase : Optional[Any] = tmp
return ans
| 72 |
"""simple docstring"""
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
_lowerCamelCase : int = [0 for i in range(n + 1 )]
_lowerCamelCase : List[str] = 1
_lowerCamelCase : Any = 1
for i in range(2, int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i, n + 1, A_ ):
_lowerCamelCase : str = 1
_lowerCamelCase : Tuple = 0
for i in range(A_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 | 1 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
_UpperCamelCase = TypeVar("""T""")
class lowerCamelCase__ ( Generic[T] ):
SCREAMING_SNAKE_CASE = 42 # Cache store of keys
SCREAMING_SNAKE_CASE = 42 # References of the keys in cache
SCREAMING_SNAKE_CASE = 10 # Maximum capacity of cache
def __init__( self ,A ):
UpperCAmelCase = deque()
UpperCAmelCase = set()
if not n:
UpperCAmelCase = sys.maxsize
elif n < 0:
raise ValueError("""n should be an integer greater than 0.""" )
else:
UpperCAmelCase = n
def _UpperCamelCase ( self ,A ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase = self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def _UpperCamelCase ( self ):
for k in self.dq_store:
print(__lowercase )
def __repr__( self ):
return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCamelCase = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234 | 0 |
from __future__ import annotations
import requests
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> dict:
'''simple docstring'''
lowerCAmelCase : str = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(_UpperCAmelCase ).json()
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 10 ) -> list[dict]:
'''simple docstring'''
lowerCAmelCase : Tuple = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'
lowerCAmelCase : List[str] = requests.get(_UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(_UpperCAmelCase ) for story_id in story_ids]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase = 10 ) -> str:
'''simple docstring'''
lowerCAmelCase : Any = hackernews_top_stories(_UpperCAmelCase )
return "\n".join('* [{title}]({url})'.format(**_UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 138 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[str] = logging.get_logger(__name__)
__A : Optional[Any] = {
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = "vit_mae"
def __init__( self : Optional[int] , UpperCAmelCase_ : Dict=768 , UpperCAmelCase_ : int=12 , UpperCAmelCase_ : Dict=12 , UpperCAmelCase_ : Optional[Any]=3072 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[str]=0.02 , UpperCAmelCase_ : Union[str, Any]=1E-12 , UpperCAmelCase_ : Any=224 , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : int=16 , UpperCAmelCase_ : Union[str, Any]=512 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : List[Any]=2048 , UpperCAmelCase_ : Tuple=0.75 , UpperCAmelCase_ : str=False , **UpperCAmelCase_ : str , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : List[str] = num_hidden_layers
lowerCAmelCase : Union[str, Any] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : str = hidden_act
lowerCAmelCase : Optional[int] = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = initializer_range
lowerCAmelCase : Union[str, Any] = layer_norm_eps
lowerCAmelCase : Any = image_size
lowerCAmelCase : Union[str, Any] = patch_size
lowerCAmelCase : Union[str, Any] = num_channels
lowerCAmelCase : Any = qkv_bias
lowerCAmelCase : Union[str, Any] = decoder_num_attention_heads
lowerCAmelCase : Tuple = decoder_hidden_size
lowerCAmelCase : int = decoder_num_hidden_layers
lowerCAmelCase : Optional[Any] = decoder_intermediate_size
lowerCAmelCase : Union[str, Any] = mask_ratio
lowerCAmelCase : Any = norm_pix_loss
| 138 | 1 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __snake_case ( __UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ,__UpperCamelCase : int ):
"""simple docstring"""
if (ksize % 2) == 0:
A_ = ksize + 1
A_ = np.zeros((ksize, ksize) ,dtype=np.floataa )
# each value
for y in range(A__ ):
for x in range(A__ ):
# distance from center
A_ = x - ksize // 2
A_ = y - ksize // 2
# degree to radiant
A_ = theta / 180 * np.pi
A_ = np.cos(_theta )
A_ = np.sin(_theta )
# get kernel x
A_ = cos_theta * px + sin_theta * py
# get kernel y
A_ = -sin_theta * px + cos_theta * py
# fill kernel
A_ = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
__a :int = imread('../image_data/lena.jpg')
# turn image in gray scale value
__a :str = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
__a :Optional[Any] = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
__a :Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
__a :Dict = out / out.max() * 255
__a :Any = out.astype(np.uinta)
imshow('Original', gray)
imshow('Gabor filter with 20x20 mask and 6 directions', out)
waitKey(0) | 357 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
__a :Optional[Any] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ):
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , UpperCAmelCase , )
super().__init__(*UpperCAmelCase , **UpperCAmelCase ) | 329 | 0 |
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
_snake_case = ["text", "image", "audio"]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
inputs.append(create_inputs(_lowerCamelCase ) )
else:
raise ValueError(F"Invalid type requested: {input_type}" )
return inputs
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
for output in outputs:
if isinstance(_lowerCamelCase , (str, AgentText) ):
output_types.append("text" )
elif isinstance(_lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(_lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F"Invalid output: {output}" )
return output_types
@is_tool_test
class UpperCAmelCase_ :
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, "inputs"))
self.assertTrue(hasattr(self.tool, "outputs"))
_lowerCAmelCase : List[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input, __a):
for __input in _input:
self.assertTrue(__input in authorized_types)
else:
self.assertTrue(_input in authorized_types)
_lowerCAmelCase : str = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = create_inputs(self.tool.inputs)
_lowerCAmelCase : Dict = self.tool(*__a)
# There is a single output
if len(self.tool.outputs) == 1:
_lowerCAmelCase : Dict = [outputs]
self.assertListEqual(output_types(__a), self.tool.outputs)
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(hasattr(self.tool, "description"))
self.assertTrue(hasattr(self.tool, "default_checkpoint"))
self.assertTrue(self.tool.description.startswith("This is a tool that"))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = create_inputs(self.tool.inputs)
_lowerCAmelCase : Any = self.tool(*__a)
if not isinstance(__a, __a):
_lowerCAmelCase : str = [outputs]
self.assertEqual(len(__a), len(self.tool.outputs))
for output, output_type in zip(__a, self.tool.outputs):
_lowerCAmelCase : Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__a, __a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = create_inputs(self.tool.inputs)
_lowerCAmelCase : Tuple = []
for _input, input_type in zip(__a, self.tool.inputs):
if isinstance(__a, __a):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input) for _input_type in input_type])
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input))
# Should not raise an error
_lowerCAmelCase : Dict = self.tool(*__a)
if not isinstance(__a, __a):
_lowerCAmelCase : Any = [outputs]
self.assertEqual(len(__a), len(self.tool.outputs))
| 36 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
A__: int = logging.getLogger(__name__)
@dataclass
class A__ :
__UpperCamelCase : str
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[str]]
@dataclass
class A__ :
__UpperCamelCase : List[int]
__UpperCamelCase : List[int]
__UpperCamelCase : Optional[List[int]] = None
__UpperCamelCase : Optional[List[int]] = None
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : str = "train"
__UpperCamelCase : Tuple = "dev"
__UpperCamelCase : str = "test"
class A__ :
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Union[Split, str] ) -> List[InputExample]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :str ) -> List[str]:
'''simple docstring'''
raise NotImplementedError
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :List[InputExample] , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Optional[Any]="[CLS]" , SCREAMING_SNAKE_CASE :Optional[int]=1 , SCREAMING_SNAKE_CASE :Any="[SEP]" , SCREAMING_SNAKE_CASE :List[Any]=False , SCREAMING_SNAKE_CASE :Union[str, Any]=False , SCREAMING_SNAKE_CASE :List[str]=0 , SCREAMING_SNAKE_CASE :str=0 , SCREAMING_SNAKE_CASE :Dict=-1_0_0 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :Tuple=True , ) -> List[InputFeatures]:
'''simple docstring'''
_a : str ={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
_a : Tuple =[]
for ex_index, example in enumerate(SCREAMING_SNAKE_CASE ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
_a : Optional[Any] =[]
_a : List[Any] =[]
for word, label in zip(example.words , example.labels ):
_a : Optional[int] =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(SCREAMING_SNAKE_CASE ) > 0:
tokens.extend(SCREAMING_SNAKE_CASE )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(SCREAMING_SNAKE_CASE ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_a : Optional[int] =tokenizer.num_special_tokens_to_add()
if len(SCREAMING_SNAKE_CASE ) > max_seq_length - special_tokens_count:
_a : List[Any] =tokens[: (max_seq_length - special_tokens_count)]
_a : Tuple =label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_a : Dict =[sequence_a_segment_id] * len(SCREAMING_SNAKE_CASE )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_a : Any =[cls_token] + tokens
_a : Dict =[pad_token_label_id] + label_ids
_a : Union[str, Any] =[cls_token_segment_id] + segment_ids
_a : List[str] =tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_a : Optional[int] =[1 if mask_padding_with_zero else 0] * len(SCREAMING_SNAKE_CASE )
# Zero-pad up to the sequence length.
_a : Union[str, Any] =max_seq_length - len(SCREAMING_SNAKE_CASE )
if pad_on_left:
_a : Optional[Any] =([pad_token] * padding_length) + input_ids
_a : Optional[int] =([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_a : Union[str, Any] =([pad_token_segment_id] * padding_length) + segment_ids
_a : Dict =([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
assert len(SCREAMING_SNAKE_CASE ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(SCREAMING_SNAKE_CASE ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_a : Tuple =None
features.append(
InputFeatures(
input_ids=SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , token_type_ids=SCREAMING_SNAKE_CASE , label_ids=SCREAMING_SNAKE_CASE ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self :Dict , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :int=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> List[str]:
'''simple docstring'''
# Load data features from cache or dataset file
_a : Optional[Any] =os.path.join(
SCREAMING_SNAKE_CASE , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(SCREAMING_SNAKE_CASE ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a : List[str] =cached_features_file + """.lock"""
with FileLock(SCREAMING_SNAKE_CASE ):
if os.path.exists(SCREAMING_SNAKE_CASE ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
_a : Any =torch.load(SCREAMING_SNAKE_CASE )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
_a : Any =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[str] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , SCREAMING_SNAKE_CASE )
def __len__( self :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :Dict , SCREAMING_SNAKE_CASE :int ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
if is_tf_available():
import tensorflow as tf
class A__ :
__UpperCamelCase : List[InputFeatures]
__UpperCamelCase : int = -100
def __init__( self :str , SCREAMING_SNAKE_CASE :TokenClassificationTask , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :PreTrainedTokenizer , SCREAMING_SNAKE_CASE :List[str] , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :Optional[int] = None , SCREAMING_SNAKE_CASE :str=False , SCREAMING_SNAKE_CASE :Split = Split.train , ) -> Any:
'''simple docstring'''
_a : Tuple =token_classification_task.read_examples_from_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# TODO clean up all this to leverage built-in features of tokenizers
_a : List[Any] =token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_a : Union[str, Any] =tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
_a : List[Any] =self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self :str ) -> Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self :int , SCREAMING_SNAKE_CASE :str ) -> InputFeatures:
'''simple docstring'''
return self.features[i]
| 276 | 0 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[str] = (UnCLIPScheduler,)
def __lowerCAmelCase ( self , **__A ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**__A )
return config
def __lowerCAmelCase ( self ) -> Dict:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __lowerCAmelCase ( self ) -> str:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def __lowerCAmelCase ( self ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def __lowerCAmelCase ( self ) -> int:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def __lowerCAmelCase ( self ) -> Any:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = self.scheduler_classes[0]
lowerCAmelCase_ :List[str] = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCAmelCase_ :Optional[Any] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1E-5
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :Dict = self.scheduler_classes[0]
lowerCAmelCase_ :List[Any] = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCAmelCase_ :Union[str, Any] = scheduler_class(**__A )
lowerCAmelCase_ :Union[str, Any] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7_9_9_8_0_5_2 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0_0_1_0_0_1_1 < 1E-5
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Any = self.scheduler_classes[0]
lowerCAmelCase_ :int = self.get_scheduler_config()
lowerCAmelCase_ :int = scheduler_class(**__A )
lowerCAmelCase_ :Tuple = scheduler.timesteps
lowerCAmelCase_ :List[str] = self.dummy_model()
lowerCAmelCase_ :Optional[Any] = self.dummy_sample_deter
lowerCAmelCase_ :str = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :List[str] = model(__A , __A )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :Optional[int] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCAmelCase_ :int = pred_prev_sample
lowerCAmelCase_ :Union[str, Any] = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1E-3
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :str = self.scheduler_classes[0]
lowerCAmelCase_ :Union[str, Any] = self.get_scheduler_config()
lowerCAmelCase_ :Dict = scheduler_class(**__A )
scheduler.set_timesteps(25 )
lowerCAmelCase_ :Optional[Any] = scheduler.timesteps
lowerCAmelCase_ :Any = self.dummy_model()
lowerCAmelCase_ :Tuple = self.dummy_sample_deter
lowerCAmelCase_ :Tuple = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCAmelCase_ :Optional[int] = model(__A , __A )
if i + 1 == timesteps.shape[0]:
lowerCAmelCase_ :int = None
else:
lowerCAmelCase_ :str = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCAmelCase_ :Optional[Any] = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
lowerCAmelCase_ :List[str] = pred_prev_sample
lowerCAmelCase_ :Tuple = torch.sum(torch.abs(__A ) )
lowerCAmelCase_ :Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1E-3
def __lowerCAmelCase ( self ) -> Tuple:
pass
def __lowerCAmelCase ( self ) -> Union[str, Any]:
pass
| 367 |
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :str = "detr"
UpperCAmelCase_ :str = ["past_key_values"]
UpperCAmelCase_ :Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , __A=True , __A=None , __A=3 , __A=100 , __A=6 , __A=2048 , __A=8 , __A=6 , __A=2048 , __A=8 , __A=0.0 , __A=0.0 , __A=True , __A="relu" , __A=256 , __A=0.1 , __A=0.0 , __A=0.0 , __A=0.0_2 , __A=1.0 , __A=False , __A="sine" , __A="resnet50" , __A=True , __A=False , __A=1 , __A=5 , __A=2 , __A=1 , __A=1 , __A=5 , __A=2 , __A=0.1 , **__A , ) -> List[Any]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase_ :int = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__A , __A ):
lowerCAmelCase_ :str = backbone_config.get("""model_type""" )
lowerCAmelCase_ :List[Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase_ :Optional[Any] = config_class.from_dict(__A )
# set timm attributes to None
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :Optional[Any] = None, None, None
lowerCAmelCase_ :Tuple = use_timm_backbone
lowerCAmelCase_ :Optional[int] = backbone_config
lowerCAmelCase_ :Optional[int] = num_channels
lowerCAmelCase_ :int = num_queries
lowerCAmelCase_ :List[Any] = d_model
lowerCAmelCase_ :Optional[int] = encoder_ffn_dim
lowerCAmelCase_ :Tuple = encoder_layers
lowerCAmelCase_ :int = encoder_attention_heads
lowerCAmelCase_ :Optional[Any] = decoder_ffn_dim
lowerCAmelCase_ :List[str] = decoder_layers
lowerCAmelCase_ :Dict = decoder_attention_heads
lowerCAmelCase_ :Dict = dropout
lowerCAmelCase_ :Tuple = attention_dropout
lowerCAmelCase_ :Union[str, Any] = activation_dropout
lowerCAmelCase_ :Any = activation_function
lowerCAmelCase_ :List[str] = init_std
lowerCAmelCase_ :Optional[int] = init_xavier_std
lowerCAmelCase_ :int = encoder_layerdrop
lowerCAmelCase_ :Union[str, Any] = decoder_layerdrop
lowerCAmelCase_ :List[str] = encoder_layers
lowerCAmelCase_ :Union[str, Any] = auxiliary_loss
lowerCAmelCase_ :str = position_embedding_type
lowerCAmelCase_ :List[Any] = backbone
lowerCAmelCase_ :str = use_pretrained_backbone
lowerCAmelCase_ :str = dilation
# Hungarian matcher
lowerCAmelCase_ :List[Any] = class_cost
lowerCAmelCase_ :Union[str, Any] = bbox_cost
lowerCAmelCase_ :Tuple = giou_cost
# Loss coefficients
lowerCAmelCase_ :Optional[int] = mask_loss_coefficient
lowerCAmelCase_ :Union[str, Any] = dice_loss_coefficient
lowerCAmelCase_ :Tuple = bbox_loss_coefficient
lowerCAmelCase_ :Tuple = giou_loss_coefficient
lowerCAmelCase_ :Dict = eos_coefficient
super().__init__(is_encoder_decoder=__A , **__A )
@property
def __lowerCAmelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowerCAmelCase ( self ) -> int:
return self.d_model
@classmethod
def __lowerCAmelCase ( cls , __A , **__A ) -> Any:
return cls(backbone_config=__A , **__A )
def __lowerCAmelCase ( self ) -> Dict[str, any]:
lowerCAmelCase_ :List[str] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase_ :Dict = self.backbone_config.to_dict()
lowerCAmelCase_ :str = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :List[Any] = version.parse("1.11" )
@property
def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def __lowerCAmelCase ( self ) -> float:
return 1E-5
@property
def __lowerCAmelCase ( self ) -> int:
return 12
| 1 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = XLMProphetNetTokenizer
lowercase = False
lowercase = True
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : List[Any] = XLMProphetNetTokenizer(__UpperCamelCase ,keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Dict = '[PAD]'
lowercase_ : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) ,__UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'[PAD]' )
self.assertEqual(vocab_keys[1] ,'[CLS]' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(__UpperCamelCase ) ,1012 )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size ,1012 )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = XLMProphetNetTokenizer(__UpperCamelCase ,keep_accents=__UpperCamelCase )
lowercase_ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(__UpperCamelCase ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase_ : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
__UpperCamelCase ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
lowercase_ : Any = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
lowercase_ : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] ,)
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ : Tuple = 'Hello World!'
lowercase_ : Union[str, Any] = [3_5389, 6672, 49, 2]
self.assertListEqual(__UpperCamelCase ,self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,)
| 213 | """simple docstring"""
__SCREAMING_SNAKE_CASE ={}
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
lowercase_ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
lowercase_ : Optional[int] = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
lowercase_ : Any = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
lowercase_ : Dict = _calculate(days - 1 , __SCREAMING_SNAKE_CASE , 0 )
lowercase_ : str = state_late + state_absent + state_ontime
lowercase_ : Tuple = prizestrings
return prizestrings
def lowercase__( __SCREAMING_SNAKE_CASE : int = 30 ):
return _calculate(__SCREAMING_SNAKE_CASE , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 213 | 1 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Dict ):
'''simple docstring'''
lowercase_ = 1
lowercase_ = 2
while i * i <= n:
lowercase_ = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = 1
lowercase_ = 1
while True:
i += 1
t_num += i
if count_divisors(__lowerCamelCase ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 297 |
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("""Program to check whether a number is a Perfect number or not...""")
SCREAMING_SNAKE_CASE__ = int(input("""Enter number: """).strip())
print(f"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 297 | 1 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
'''simple docstring'''
@staticmethod
def _lowerCAmelCase( *__lowerCAmelCase , **__lowerCAmelCase ) -> Optional[int]:
pass
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Optional[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : int = DepthEstimationPipeline(model=__lowerCAmelCase , image_processor=__lowerCAmelCase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> Any:
lowercase__ : str = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , __lowerCAmelCase )
import datasets
lowercase__ : Union[str, Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
lowercase__ : Any = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , __lowerCAmelCase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def _lowerCAmelCase( self ) -> Dict:
pass
@slow
@require_torch
def _lowerCAmelCase( self ) -> str:
lowercase__ : Any = '''Intel/dpt-large'''
lowercase__ : List[Any] = pipeline('''depth-estimation''' , model=__lowerCAmelCase )
lowercase__ : int = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
lowercase__ : str = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def _lowerCAmelCase( self ) -> Tuple:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 198 | '''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase__ : Dict = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase__ : Any = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase__ : List[Any] = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase__ : str = model(__lowerCAmelCase , labels=__lowerCAmelCase ).loss
lowercase__ : List[str] = -tf.math.reduce_mean(__lowerCAmelCase ).numpy()
lowercase__ : str = -2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 198 | 1 |
"""simple docstring"""
def __snake_case ( SCREAMING_SNAKE_CASE__ : list ) -> Optional[int]:
'''simple docstring'''
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(SCREAMING_SNAKE_CASE__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(SCREAMING_SNAKE_CASE__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 354 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : Union[str, Any] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : List[Any] = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Union[str, Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_UpperCAmelCase : Optional[int] = {"unk_token": "<unk>"}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
_UpperCAmelCase : List[str] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def snake_case_ ( self : List[Any] , **A : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : int , **A : Any ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : List[str] , **A : Optional[Any] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : str ):
_UpperCAmelCase : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase : Dict = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Dict = self.get_rust_tokenizer()
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Optional[int] = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Any = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Any = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : str = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : Any = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : Optional[int] = image_processor(A , return_tensors="np" )
_UpperCAmelCase : Any = processor(images=A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : str ):
_UpperCAmelCase : Tuple = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : List[str] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Optional[int] = "lower newer"
_UpperCAmelCase : Union[str, Any] = processor(text=A )
_UpperCAmelCase : Optional[int] = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : str = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : int ):
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[str] = processor.batch_decode(A )
_UpperCAmelCase : int = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def snake_case_ ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : int = CLIPProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = "lower newer"
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Optional[Any] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 202 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__A : int = LEDConfig
__A : Tuple = {}
__A : List[Any] = "gelu"
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=20 , __A=2 , __A=1 , __A=0 , __A=4 , ):
"""simple docstring"""
lowerCamelCase : List[Any] = parent
lowerCamelCase : Optional[int] = batch_size
lowerCamelCase : int = seq_length
lowerCamelCase : Tuple = is_training
lowerCamelCase : Union[str, Any] = use_labels
lowerCamelCase : str = vocab_size
lowerCamelCase : Union[str, Any] = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : int = num_attention_heads
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Optional[int] = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : Optional[int] = max_position_embeddings
lowerCamelCase : Optional[Any] = eos_token_id
lowerCamelCase : List[Any] = pad_token_id
lowerCamelCase : str = bos_token_id
lowerCamelCase : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
lowerCamelCase : int = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
lowerCamelCase : Union[str, Any] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
lowerCamelCase : List[Any] = prepare_led_inputs_dict(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase : Any = tf.concat(
[tf.zeros_like(UpperCAmelCase__ )[:, :-1], tf.ones_like(UpperCAmelCase__ )[:, -1:]] , axis=-1 , )
lowerCamelCase : str = global_attention_mask
return config, inputs_dict
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = TFLEDModel(config=UpperCAmelCase__ ).get_decoder()
lowerCamelCase : int = inputs_dict["input_ids"]
lowerCamelCase : Union[str, Any] = input_ids[:1, :]
lowerCamelCase : int = inputs_dict["attention_mask"][:1, :]
lowerCamelCase : Dict = 1
# first forward pass
lowerCamelCase : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCamelCase , lowerCamelCase : List[Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : List[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase : str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase : List[str] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowerCamelCase : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase : Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCAmelCase__ , UpperCAmelCase__ , rtol=1e-3 )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase : Optional[Any] = tf.cast(tf.math.not_equal(lowerCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase : Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase : Optional[int] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
__A : List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
__A : Union[str, Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : str = True
__A : Tuple = False
__A : List[str] = False
__A : int = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFLEDModelTester(self )
lowerCamelCase : Dict = ConfigTester(self , config_class=UpperCAmelCase__ )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCAmelCase__ )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase : Optional[Any] = tf.zeros_like(inputs_dict["attention_mask"] )
lowerCamelCase : Optional[int] = 2
lowerCamelCase : List[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
lowerCamelCase : int = True
lowerCamelCase : List[str] = self.model_tester.seq_length
lowerCamelCase : Optional[Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__A ):
lowerCamelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__A ):
lowerCamelCase : Dict = [t.numpy() for t in outputs.encoder_attentions]
lowerCamelCase : List[str] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(UpperCAmelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[str] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[Any] = model_class(UpperCAmelCase__ )
lowerCamelCase : str = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
lowerCamelCase : Dict = len(UpperCAmelCase__ )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
if self.is_encoder_decoder:
lowerCamelCase : List[str] = model_class(UpperCAmelCase__ )
lowerCamelCase : Tuple = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_decoder_attentions_output(UpperCAmelCase__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowerCamelCase : Optional[int] = True
lowerCamelCase : Optional[int] = model_class(UpperCAmelCase__ )
lowerCamelCase : Dict = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
# Check attention is always last and order is fine
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Tuple = model_class(UpperCAmelCase__ )
lowerCamelCase : Optional[int] = model(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(UpperCAmelCase__ ) )
self.assertEqual(model.config.output_hidden_states , UpperCAmelCase__ )
check_encoder_attentions_output(UpperCAmelCase__ )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return tf.constant(lowerCAmelCase_ , dtype=tf.intaa )
_snake_case = 1E-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
lowerCamelCase : Union[str, Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
lowerCamelCase : Optional[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
lowerCamelCase : List[str] = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase : str = model(**UpperCAmelCase__ )[0]
lowerCamelCase : List[str] = (1, 1024, 768)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
lowerCamelCase : Optional[Any] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-3 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
lowerCamelCase : Union[str, Any] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
lowerCamelCase : Optional[Any] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
lowerCamelCase : Any = prepare_led_inputs_dict(model.config , UpperCAmelCase__ , UpperCAmelCase__ )
lowerCamelCase : Optional[Any] = model(**UpperCAmelCase__ )[0]
lowerCamelCase : Dict = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , UpperCAmelCase__ )
# change to expected output here
lowerCamelCase : Any = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-3 , rtol=1e-3 )
| 283 |
"""simple docstring"""
import math
import random
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
a__ : Tuple = 0.02
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(lowerCAmelCase_ ):
# Forward propagation
__SCREAMING_SNAKE_CASE = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
__SCREAMING_SNAKE_CASE = (expected / 100) - layer_a
# Error delta
__SCREAMING_SNAKE_CASE = layer_1_error * sigmoid_function(lowerCAmelCase_ , lowerCAmelCase_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : List[str] = int(input('''Expected value: '''))
a__ : str = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 54 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
UpperCAmelCase_ : Dict = TypeVar('T')
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
snake_case__ : deque[T] # Cache store of keys
snake_case__ : set[T] # References of the keys in cache
snake_case__ : int = 10 # Maximum capacity of cache
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int ) -> None:
a_ : List[Any] = deque()
a_ : str = set()
if not n:
a_ : Any = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
a_ : Any = n
def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
a_ : Optional[int] = self.dq_store.pop()
self.key_reference.remove(SCREAMING_SNAKE_CASE__ )
else:
self.dq_store.remove(SCREAMING_SNAKE_CASE__ )
self.dq_store.appendleft(SCREAMING_SNAKE_CASE__ )
self.key_reference.add(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> None:
for k in self.dq_store:
print(SCREAMING_SNAKE_CASE__ )
def __repr__( self : Dict ) -> str:
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 120 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Optional[int]:
"""simple docstring"""
a_ : str = filter(lambda __A : p.requires_grad , model.parameters() )
a_ : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> List[str]:
"""simple docstring"""
if metric == "rouge2":
a_ : Dict = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
a_ : Dict = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
a_ : Union[str, Any] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
a_ : str = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
a_ : Dict = ModelCheckpoint(
dirpath=__A , filename=__A , monitor=F"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[Any] ) -> int:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=__A , verbose=__A , )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
a_ : int = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
a_ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
a_ : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
a_ : Tuple = od / 'test_results.txt'
a_ : Optional[int] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a_ : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
a_ : Optional[Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
a_ : int = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
a_ : List[str] = val.item()
a_ : int = F"""{key}: {val:.6f}\n"""
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
a_ : Optional[Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
try:
a_ : Any = pl_module.model.model.num_parameters()
except AttributeError:
a_ : List[str] = pl_module.model.num_parameters()
a_ : Any = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 120 | 1 |
from __future__ import annotations
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: str = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print("" )
print(len(lowerCAmelCase__ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 13 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _A (lowerCAmelCase__ :Dict ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class a :
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
pass
def __UpperCAmelCase ( self ) -> Any:
pass
def __UpperCAmelCase ( self ) -> List[Any]:
pass
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = np.abs((a - b) ).max()
self.assertLessEqual(__magic_name__ , __magic_name__ , f'Difference between torch and flax is {diff} (>= {tol}).' )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Tuple:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Optional[Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Union[str, Any]:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model(input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ )
_a = after_output[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-3 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=None , **__magic_name__ ) -> Any:
_a , _a = self.get_vision_text_model(__magic_name__ , __magic_name__ )
_a = {'vision_model': vision_model, 'text_model': text_model}
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__magic_name__ )
_a = model(
input_ids=__magic_name__ , pixel_values=__magic_name__ , attention_mask=__magic_name__ , output_attentions=__magic_name__ )
_a = output.vision_model_output.attentions
self.assertEqual(len(__magic_name__ ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_a = to_atuple(vision_model.config.image_size )
_a = to_atuple(vision_model.config.patch_size )
_a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_a = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_a = output.text_model_output.attentions
self.assertEqual(len(__magic_name__ ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> int:
pt_model.to(__magic_name__ )
pt_model.eval()
# prepare inputs
_a = inputs_dict
_a = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
_a = pt_model(**__magic_name__ ).to_tuple()
_a = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
_a = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
_a = VisionTextDualEncoderModel.from_pretrained(__magic_name__ , from_flax=__magic_name__ )
pt_model_loaded.to(__magic_name__ )
pt_model_loaded.eval()
with torch.no_grad():
_a = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__magic_name__ , pt_output_loaded.numpy() , 4e-2 )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ )
_a = fx_state
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = VisionTextDualEncoderConfig.from_vision_text_configs(__magic_name__ , __magic_name__ )
_a = VisionTextDualEncoderModel(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel(__magic_name__ )
_a = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params )
self.check_pt_flax_equivalence(__magic_name__ , __magic_name__ , __magic_name__ )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.prepare_config_and_inputs()
self.check_save_load(**__magic_name__ )
def __UpperCAmelCase ( self ) -> Dict:
_a = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__magic_name__ )
@is_pt_flax_cross_test
def __UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.prepare_config_and_inputs()
_a = config_inputs_dict.pop('vision_config' )
_a = config_inputs_dict.pop('text_config' )
_a = config_inputs_dict
self.check_equivalence_pt_to_flax(__magic_name__ , __magic_name__ , __magic_name__ )
self.check_equivalence_flax_to_pt(__magic_name__ , __magic_name__ , __magic_name__ )
@slow
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a , _a = self.get_pretrained_model_and_inputs()
_a = model_a(**__magic_name__ )
_a = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__magic_name__ )
_a = FlaxVisionTextDualEncoderModel.from_pretrained(__magic_name__ )
_a = model_a(**__magic_name__ )
_a = after_outputs[0]
_a = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__magic_name__ , 1e-5 )
@require_flax
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> List[str]:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Optional[int]:
_a = FlaxViTModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = FlaxViTModelTester(self )
_a = FlaxBertModelTester(self )
_a = vit_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
def __UpperCAmelCase ( self ) -> Any:
_a = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=__magic_name__ , text_from_pt=__magic_name__ , )
_a = 13
_a = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
_a = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
_a = random_attention_mask([batch_size, 4] )
_a = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __UpperCAmelCase ( self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
_a = FlaxCLIPVisionModel(__magic_name__ )
_a = FlaxBertModel(__magic_name__ )
return vision_model, text_model
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxCLIPVisionModelTester(self )
_a = FlaxBertModelTester(self )
_a = clip_model_tester.prepare_config_and_inputs()
_a = bert_model_tester.prepare_config_and_inputs()
_a , _a = vision_config_and_inputs
_a , _a , _a , _a = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class a ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self ) -> Tuple:
_a = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 )
_a = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
_a = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
_a = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=__magic_name__ , padding=__magic_name__ , return_tensors='np' )
_a = model(**__magic_name__ )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_a = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __magic_name__ , atol=1e-3 ) )
| 168 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( lowerCamelCase__ , unittest.TestCase ):
snake_case__ = KandinskyInpaintPipeline
snake_case__ = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
snake_case__ = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
snake_case__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
snake_case__ = False
@property
def lowerCamelCase__ ( self : List[Any] ):
return 32
@property
def lowerCamelCase__ ( self : Optional[int] ):
return 32
@property
def lowerCamelCase__ ( self : Optional[int] ):
return self.time_input_dim
@property
def lowerCamelCase__ ( self : Optional[Any] ):
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self : List[str] ):
return 100
@property
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def lowerCamelCase__ ( self : Optional[int] ):
torch.manual_seed(0 )
__lowerCamelCase : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
__lowerCamelCase : Union[str, Any] = MultilingualCLIP(__snake_case )
__lowerCamelCase : Tuple = text_encoder.eval()
return text_encoder
@property
def lowerCamelCase__ ( self : int ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : str = UNetaDConditionModel(**__snake_case )
return model
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self : str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : int = self.dummy_text_encoder
__lowerCamelCase : Optional[int] = self.dummy_tokenizer
__lowerCamelCase : str = self.dummy_unet
__lowerCamelCase : Union[str, Any] = self.dummy_movq
__lowerCamelCase : Tuple = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__snake_case , set_alpha_to_one=__snake_case , steps_offset=1 , prediction_type="epsilon" , thresholding=__snake_case , )
__lowerCamelCase : List[Any] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : int=0 ):
__lowerCamelCase : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
__lowerCamelCase : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
__lowerCamelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(__snake_case ) ).to(__snake_case )
__lowerCamelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Optional[int] = Image.fromarray(np.uinta(__snake_case ) ).convert("RGB" ).resize((256, 256) )
# create mask
__lowerCamelCase : Any = np.ones((64, 64) , dtype=np.floataa )
__lowerCamelCase : Dict = 0
if str(__snake_case ).startswith("mps" ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(__snake_case )
else:
__lowerCamelCase : Optional[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__lowerCamelCase : Tuple = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : List[str] = 'cpu'
__lowerCamelCase : List[Any] = self.get_dummy_components()
__lowerCamelCase : Optional[int] = self.pipeline_class(**__snake_case )
__lowerCamelCase : Optional[int] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__lowerCamelCase : Dict = pipe(**self.get_dummy_inputs(__snake_case ) )
__lowerCamelCase : List[Any] = output.images
__lowerCamelCase : Any = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Any = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def lowerCamelCase__ ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__lowerCamelCase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__lowerCamelCase : str = np.ones((768, 768) , dtype=np.floataa )
__lowerCamelCase : str = 0
__lowerCamelCase : Any = 'a hat'
__lowerCamelCase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
__lowerCamelCase : str = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__lowerCamelCase : Optional[int] = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
__lowerCamelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCamelCase : List[Any] = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__lowerCamelCase : Union[str, Any] = pipeline(
__snake_case , image=__snake_case , mask_image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
__lowerCamelCase : List[str] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__snake_case , __snake_case ) | 357 | """simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( _lowerCamelCase: np.ndarray ) -> bool:
'''simple docstring'''
return np.array_equal(_lowerCamelCase , matrix.conjugate().T )
def lowercase_ ( _lowerCamelCase: np.ndarray , _lowerCamelCase: np.ndarray ) -> Any:
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = v.conjugate().T
__lowerCamelCase : Any = v_star.dot(_lowerCamelCase )
assert isinstance(_lowerCamelCase , np.ndarray )
return (v_star_dot.dot(_lowerCamelCase )) / (v_star.dot(_lowerCamelCase ))
def lowercase_ ( ) -> None:
'''simple docstring'''
__lowerCamelCase : List[str] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowerCamelCase : int = np.array([[1], [2], [3]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
print(rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) )
__lowerCamelCase : Dict = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_lowerCamelCase ), F"""{a} is not hermitian."""
assert rayleigh_quotient(_lowerCamelCase , _lowerCamelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests() | 64 | 0 |
import logging
from transformers import PretrainedConfig
_snake_case : List[str] = logging.getLogger(__name__)
_snake_case : Optional[int] = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = """bertabs"""
def __init__( self : List[str] , lowerCAmelCase_ : Optional[int]=3_0_5_2_2 , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : int=6 , lowerCAmelCase_ : List[str]=5_1_2 , lowerCAmelCase_ : List[str]=8 , lowerCAmelCase_ : List[Any]=5_1_2 , lowerCAmelCase_ : Optional[Any]=0.2 , lowerCAmelCase_ : Dict=6 , lowerCAmelCase_ : List[Any]=7_6_8 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Dict=0.2 , **lowerCAmelCase_ : int , ) -> Tuple:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_pos
__lowerCAmelCase = enc_layers
__lowerCAmelCase = enc_hidden_size
__lowerCAmelCase = enc_heads
__lowerCAmelCase = enc_ff_size
__lowerCAmelCase = enc_dropout
__lowerCAmelCase = dec_layers
__lowerCAmelCase = dec_hidden_size
__lowerCAmelCase = dec_heads
__lowerCAmelCase = dec_ff_size
__lowerCAmelCase = dec_dropout
| 284 |
def a_ ( lowerCAmelCase_ : int ):
if p < 2:
raise ValueError('p should not be less than 2!' )
elif p == 2:
return True
__lowerCAmelCase = 4
__lowerCAmelCase = (1 << p) - 1
for _ in range(p - 2 ):
__lowerCAmelCase = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 284 | 1 |
def __lowercase ( _SCREAMING_SNAKE_CASE = 50 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 193 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if not self.initialized:
SCREAMING_SNAKE_CASE = RagRetriever(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,index=lowerCamelCase__ ,init_retrieval=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Any:
'''simple docstring'''
self.retriever.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.retriever._main_retrieve(lowerCamelCase__ ,lowerCamelCase__ )
return doc_ids, retrieved_doc_embeds
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self : int ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict=None ) -> Any:
'''simple docstring'''
if index is not None and index.is_initialized() and len(lowerCamelCase__ ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,index=lowerCamelCase__ ,init_retrieval=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
for worker in self.retrieval_workers
] )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def SCREAMING_SNAKE_CASE__ ( self : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : int ) -> Dict:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
SCREAMING_SNAKE_CASE = self.retrieval_workers[random.randint(0 ,len(self.retrieval_workers ) - 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = ray.get(random_worker.retrieve.remote(lowerCamelCase__ ,lowerCamelCase__ ) )
else:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self._main_retrieve(lowerCamelCase__ ,lowerCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Union[str, Any]=None ,**lowerCamelCase__ : Optional[Any] ) -> Any:
'''simple docstring'''
return super(lowerCamelCase__ ,cls ).get_tokenizers(lowerCamelCase__ ,lowerCamelCase__ ,**lowerCamelCase__ )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : List[Any]=None ,**lowerCamelCase__ : Any ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = kwargs.pop("""config""" ,lowerCamelCase__ ) or RagConfig.from_pretrained(lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = RagTokenizer.from_pretrained(lowerCamelCase__ ,config=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = rag_tokenizer.question_encoder
SCREAMING_SNAKE_CASE = rag_tokenizer.generator
if indexed_dataset is not None:
SCREAMING_SNAKE_CASE = """custom"""
SCREAMING_SNAKE_CASE = CustomHFIndex(config.retrieval_vector_size ,lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = cls._build_index(lowerCamelCase__ )
return cls(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,retrieval_workers=lowerCamelCase__ ,index=lowerCamelCase__ ,)
| 193 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
UpperCAmelCase_ : str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Any = nn.functional.normalize(__magic_name__ )
UpperCamelCase :Union[str, Any] = nn.functional.normalize(__magic_name__ )
return torch.mm(__magic_name__ , normalized_text_embeds.t() )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : Dict = CLIPConfig
snake_case__ : Union[str, Any] = ["""CLIPEncoderLayer"""]
def __init__( self : Optional[Any] , __lowerCamelCase : CLIPConfig ):
super().__init__(__lowerCamelCase )
UpperCamelCase :Optional[Any] = CLIPVisionModel(config.vision_config )
UpperCamelCase :Optional[Any] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__lowerCamelCase )
UpperCamelCase :Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__lowerCamelCase )
UpperCamelCase :Union[str, Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__lowerCamelCase )
UpperCamelCase :List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=__lowerCamelCase )
UpperCamelCase :Any = nn.Parameter(torch.ones(3 ) , requires_grad=__lowerCamelCase )
@torch.no_grad()
def _A ( self : Any , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ):
UpperCamelCase :Tuple = self.vision_model(__lowerCamelCase )[1] # pooled_output
UpperCamelCase :Optional[Any] = self.visual_projection(__lowerCamelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase :Tuple = cosine_distance(__lowerCamelCase , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase :Union[str, Any] = cosine_distance(__lowerCamelCase , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase :Any = []
UpperCamelCase :Any = image_embeds.shape[0]
for i in range(__lowerCamelCase ):
UpperCamelCase :Any = {"""special_scores""": {}, """special_care""": [], """concept_scores""": {}, """bad_concepts""": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase :Dict = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase :Dict = special_cos_dist[i][concept_idx]
UpperCamelCase :List[str] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase :Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["""special_scores"""][concept_idx]} )
UpperCamelCase :str = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase :Optional[Any] = cos_dist[i][concept_idx]
UpperCamelCase :Union[str, Any] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase :Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__lowerCamelCase )
result.append(__lowerCamelCase )
UpperCamelCase :Dict = [len(res["""bad_concepts"""] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _A ( self : List[str] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : torch.FloatTensor ):
UpperCamelCase :int = self.vision_model(__lowerCamelCase )[1] # pooled_output
UpperCamelCase :Dict = self.visual_projection(__lowerCamelCase )
UpperCamelCase :str = cosine_distance(__lowerCamelCase , self.special_care_embeds )
UpperCamelCase :Any = cosine_distance(__lowerCamelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase :List[str] = 0.0
UpperCamelCase :Optional[int] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase :List[str] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase :str = special_care * 0.01
UpperCamelCase :Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase :List[str] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase :Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 38 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 50 ):
_UpperCAmelCase : Tuple = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 234 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
_SCREAMING_SNAKE_CASE : Tuple = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
_SCREAMING_SNAKE_CASE : List[str] = """UperNetConfig"""
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : Union[int, Tuple[int, int]] , __lowerCamelCase : Union[int, Tuple[int, int], str] = 0 , __lowerCamelCase : bool = False , __lowerCamelCase : Union[int, Tuple[int, int]] = 1 , ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Convad(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , kernel_size=__lowerCamelCase , padding=__lowerCamelCase , bias=__lowerCamelCase , dilation=__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.ReLU()
def lowercase_ ( self : Tuple , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ = self.conv(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.batch_norm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.activation(__lowerCamelCase )
return output
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE__ = [
nn.AdaptiveAvgPoolad(__lowerCamelCase ),
UperNetConvModule(__lowerCamelCase , __lowerCamelCase , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__lowerCamelCase ) , __lowerCamelCase )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ = input
for layer in self.layers:
SCREAMING_SNAKE_CASE__ = layer(__lowerCamelCase )
return hidden_state
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple , __lowerCamelCase : Tuple[int, ...] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : bool ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE__ = pool_scales
SCREAMING_SNAKE_CASE__ = align_corners
SCREAMING_SNAKE_CASE__ = in_channels
SCREAMING_SNAKE_CASE__ = channels
SCREAMING_SNAKE_CASE__ = []
for i, pool_scale in enumerate(__lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = UperNetPyramidPoolingBlock(pool_scale=__lowerCamelCase , in_channels=__lowerCamelCase , channels=__lowerCamelCase )
self.blocks.append(__lowerCamelCase )
self.add_module(str(__lowerCamelCase ) , __lowerCamelCase )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : torch.Tensor ) -> List[torch.Tensor]:
SCREAMING_SNAKE_CASE__ = []
for ppm in self.blocks:
SCREAMING_SNAKE_CASE__ = ppm(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(
__lowerCamelCase , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__lowerCamelCase )
return ppm_outs
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , __lowerCamelCase : int , __lowerCamelCase : Any ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = config.pool_scales # e.g. (1, 2, 3, 6)
SCREAMING_SNAKE_CASE__ = in_channels
SCREAMING_SNAKE_CASE__ = config.hidden_size
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
SCREAMING_SNAKE_CASE__ = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
SCREAMING_SNAKE_CASE__ = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
SCREAMING_SNAKE_CASE__ = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
SCREAMING_SNAKE_CASE__ = UperNetConvModule(__lowerCamelCase , self.channels , kernel_size=1 )
SCREAMING_SNAKE_CASE__ = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__lowerCamelCase )
self.fpn_convs.append(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase_ ( self : Tuple ) -> List[Any]:
self.apply(self._init_weights )
def lowercase_ ( self : Tuple , __lowerCamelCase : Dict ) -> str:
if isinstance(__lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = inputs[-1]
SCREAMING_SNAKE_CASE__ = [x]
psp_outs.extend(self.psp_modules(__lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = torch.cat(__lowerCamelCase , dim=1 )
SCREAMING_SNAKE_CASE__ = self.bottleneck(__lowerCamelCase )
return output
def lowercase_ ( self : str , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# build laterals
SCREAMING_SNAKE_CASE__ = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__lowerCamelCase ) )
# build top-down path
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ = laterals[i - 1].shape[2:]
SCREAMING_SNAKE_CASE__ = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__lowerCamelCase , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
SCREAMING_SNAKE_CASE__ = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
SCREAMING_SNAKE_CASE__ = torch.cat(__lowerCamelCase , dim=1 )
SCREAMING_SNAKE_CASE__ = self.fpn_bottleneck(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.classifier(__lowerCamelCase )
return output
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : int = 2 , __lowerCamelCase : int = 3 , __lowerCamelCase : Union[int, Tuple[int, int]] = 1 ) -> None:
super().__init__()
SCREAMING_SNAKE_CASE__ = config
SCREAMING_SNAKE_CASE__ = config.auxiliary_in_channels
SCREAMING_SNAKE_CASE__ = config.auxiliary_channels
SCREAMING_SNAKE_CASE__ = config.auxiliary_num_convs
SCREAMING_SNAKE_CASE__ = config.auxiliary_concat_input
SCREAMING_SNAKE_CASE__ = in_index
SCREAMING_SNAKE_CASE__ = (kernel_size // 2) * dilation
SCREAMING_SNAKE_CASE__ = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__lowerCamelCase , padding=__lowerCamelCase , dilation=__lowerCamelCase ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__lowerCamelCase , padding=__lowerCamelCase , dilation=__lowerCamelCase ) )
if self.num_convs == 0:
SCREAMING_SNAKE_CASE__ = nn.Identity()
else:
SCREAMING_SNAKE_CASE__ = nn.Sequential(*__lowerCamelCase )
if self.concat_input:
SCREAMING_SNAKE_CASE__ = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__lowerCamelCase , padding=kernel_size // 2 )
SCREAMING_SNAKE_CASE__ = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase_ ( self : Tuple ) -> Tuple:
self.apply(self._init_weights )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : List[Any] ) -> List[str]:
if isinstance(__lowerCamelCase , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self : Tuple , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
# just take the relevant feature maps
SCREAMING_SNAKE_CASE__ = encoder_hidden_states[self.in_index]
SCREAMING_SNAKE_CASE__ = self.convs(__lowerCamelCase )
if self.concat_input:
SCREAMING_SNAKE_CASE__ = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
SCREAMING_SNAKE_CASE__ = self.classifier(__lowerCamelCase )
return output
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
a = UperNetConfig
a = "pixel_values"
a = True
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase_ ( self : str ) -> Tuple:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase_ ( self : Any , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int]=False ) -> str:
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = value
_SCREAMING_SNAKE_CASE : List[Any] = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." , __UpperCamelCase , )
class UpperCAmelCase__ ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Dict:
super().__init__(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
SCREAMING_SNAKE_CASE__ = UperNetHead(__lowerCamelCase , in_channels=self.backbone.channels )
SCREAMING_SNAKE_CASE__ = UperNetFCNHead(__lowerCamelCase ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__lowerCamelCase , config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self : Union[str, Any] , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[torch.Tensor] = None , __lowerCamelCase : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]:
SCREAMING_SNAKE_CASE__ = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ = output_attentions if output_attentions is not None else self.config.output_attentions
SCREAMING_SNAKE_CASE__ = self.backbone.forward_with_filtered_kwargs(
__lowerCamelCase , output_hidden_states=__lowerCamelCase , output_attentions=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = outputs.feature_maps
SCREAMING_SNAKE_CASE__ = self.decode_head(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(__lowerCamelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = None
if self.auxiliary_head is not None:
SCREAMING_SNAKE_CASE__ = self.auxiliary_head(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = nn.functional.interpolate(
__lowerCamelCase , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
SCREAMING_SNAKE_CASE__ = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
SCREAMING_SNAKE_CASE__ = loss_fct(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = loss_fct(__lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[1:]
else:
SCREAMING_SNAKE_CASE__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__lowerCamelCase , logits=__lowerCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 351 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = ["input_features"]
def __init__( self : Dict , __lowerCamelCase : Tuple=80 , __lowerCamelCase : List[Any]=1_6000 , __lowerCamelCase : Optional[int]=160 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : List[Any]=400 , __lowerCamelCase : Union[str, Any]=0.0 , __lowerCamelCase : str=False , **__lowerCamelCase : List[str] , ) -> Any:
super().__init__(
feature_size=__lowerCamelCase , sampling_rate=__lowerCamelCase , padding_value=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE__ = n_fft
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = chunk_length
SCREAMING_SNAKE_CASE__ = chunk_length * sampling_rate
SCREAMING_SNAKE_CASE__ = self.n_samples // hop_length
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__lowerCamelCase , min_frequency=0.0 , max_frequency=8000.0 , sampling_rate=__lowerCamelCase , norm='''slaney''' , mel_scale='''slaney''' , )
def lowercase_ ( self : int , __lowerCamelCase : np.array ) -> np.ndarray:
SCREAMING_SNAKE_CASE__ = spectrogram(
__lowerCamelCase , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
SCREAMING_SNAKE_CASE__ = log_spec[:, :-1]
SCREAMING_SNAKE_CASE__ = np.maximum(__lowerCamelCase , log_spec.max() - 8.0 )
SCREAMING_SNAKE_CASE__ = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def lowercase_ ( __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : List[np.ndarray] , __lowerCamelCase : float = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = np.array(__lowerCamelCase , np.intaa )
SCREAMING_SNAKE_CASE__ = []
for vector, length in zip(__lowerCamelCase , attention_mask.sum(-1 ) ):
SCREAMING_SNAKE_CASE__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
normed_input_values.append(__lowerCamelCase )
else:
SCREAMING_SNAKE_CASE__ = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self : List[str] , __lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[str] = "max_length" , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
SCREAMING_SNAKE_CASE__ = isinstance(__lowerCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__lowerCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__lowerCamelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__lowerCamelCase , dtype=np.floataa )
elif isinstance(__lowerCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray([raw_speech] ).T]
SCREAMING_SNAKE_CASE__ = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = self.pad(
__lowerCamelCase , padding=__lowerCamelCase , max_length=max_length if max_length else self.n_samples , truncation=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
SCREAMING_SNAKE_CASE__ = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
SCREAMING_SNAKE_CASE__ = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
SCREAMING_SNAKE_CASE__ = [self._np_extract_fbank_features(__lowerCamelCase ) for waveform in input_features[0]]
if isinstance(input_features[0] , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__lowerCamelCase , dtype=np.floataa ) for feature in input_features]
else:
SCREAMING_SNAKE_CASE__ = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
SCREAMING_SNAKE_CASE__ = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__lowerCamelCase )
return padded_inputs
def lowercase_ ( self : str ) -> Dict[str, Any]:
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 218 | 0 |
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , *_UpperCamelCase , **_UpperCamelCase ) -> int:
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
self.check_model_type(_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = {}, {}
if padding is not None:
UpperCAmelCase_ : List[str] = padding
if truncation is not None:
UpperCAmelCase_ : Tuple = truncation
if top_k is not None:
UpperCAmelCase_ : Dict = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> int:
if isinstance(_UpperCamelCase , (Image.Image, str) ) and isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Optional[Any] = {'image': image, 'question': question}
else:
UpperCAmelCase_ : List[str] = image
UpperCAmelCase_ : Optional[Any] = super().__call__(_UpperCamelCase , **_UpperCamelCase )
return results
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=False , _UpperCamelCase=False ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = load_image(inputs['image'] )
UpperCAmelCase_ : Dict = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_UpperCamelCase , truncation=_UpperCamelCase )
UpperCAmelCase_ : int = self.image_processor(images=_UpperCamelCase , return_tensors=self.framework )
model_inputs.update(_UpperCamelCase )
return model_inputs
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Any = self.model(**_UpperCamelCase )
return model_outputs
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=5 ) -> str:
if top_k > self.model.config.num_labels:
UpperCAmelCase_ : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase_ : List[str] = model_outputs.logits.sigmoid()[0]
UpperCAmelCase_ , UpperCAmelCase_ : str = probs.topk(_UpperCamelCase )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
UpperCAmelCase_ : Optional[Any] = scores.tolist()
UpperCAmelCase_ : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCamelCase , _UpperCamelCase )]
| 29 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ :str = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Union[str, Any] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 329 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list[int]:
return [ord(__lowerCAmelCase ) - 96 for elem in plain]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def SCREAMING_SNAKE_CASE ( ) -> None:
UpperCamelCase__ : List[str] = encode(input("-> " ).strip().lower() )
print("Encoded: " , __lowerCAmelCase )
print("Decoded:" , decode(__lowerCAmelCase ) )
if __name__ == "__main__":
main() | 196 |
from collections.abc import Generator
from math import sin
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
if len(__lowerCAmelCase ) != 32:
raise ValueError("Input must be of length 32" )
UpperCamelCase__ : Optional[Any] = B""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCamelCase__ : int = format(__lowerCAmelCase , "08x" )[-8:]
UpperCamelCase__ : Union[str, Any] = B""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
UpperCamelCase__ : Optional[Any] = B""
for char in message:
bit_string += format(__lowerCAmelCase , "08b" ).encode("utf-8" )
UpperCamelCase__ : Any = format(len(__lowerCAmelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(__lowerCAmelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Generator[list[int], None, None]:
if len(__lowerCAmelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(__lowerCAmelCase ) , 512 ):
UpperCamelCase__ : Tuple = bit_string[pos : pos + 512]
UpperCamelCase__ : Tuple = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
UpperCamelCase__ : str = format(__lowerCAmelCase , "032b" )
UpperCamelCase__ : Optional[int] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(__lowerCAmelCase , 2 )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return (a + b) % 2**32
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> bytes:
UpperCamelCase__ : int = preprocess(__lowerCAmelCase )
UpperCamelCase__ : Optional[Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
UpperCamelCase__ : Any = 0x6745_2301
UpperCamelCase__ : Optional[int] = 0xEFCD_AB89
UpperCamelCase__ : Union[str, Any] = 0x98BA_DCFE
UpperCamelCase__ : Any = 0x1032_5476
UpperCamelCase__ : List[Any] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(__lowerCAmelCase ):
UpperCamelCase__ : Union[str, Any] = aa
UpperCamelCase__ : int = ba
UpperCamelCase__ : Union[str, Any] = ca
UpperCamelCase__ : str = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
UpperCamelCase__ : Any = d ^ (b & (c ^ d))
UpperCamelCase__ : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
UpperCamelCase__ : Any = c ^ (d & (b ^ c))
UpperCamelCase__ : str = (5 * i + 1) % 16
elif i <= 47:
UpperCamelCase__ : Dict = b ^ c ^ d
UpperCamelCase__ : Union[str, Any] = (3 * i + 5) % 16
else:
UpperCamelCase__ : Optional[int] = c ^ (b | not_aa(__lowerCAmelCase ))
UpperCamelCase__ : Optional[int] = (7 * i) % 16
UpperCamelCase__ : List[str] = (f + a + added_consts[i] + block_words[g]) % 2**32
UpperCamelCase__ : Optional[int] = d
UpperCamelCase__ : Dict = c
UpperCamelCase__ : List[Any] = b
UpperCamelCase__ : List[Any] = sum_aa(__lowerCAmelCase , left_rotate_aa(__lowerCAmelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
UpperCamelCase__ : Optional[int] = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : Any = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : Dict = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : List[str] = sum_aa(__lowerCAmelCase , __lowerCAmelCase )
UpperCamelCase__ : List[str] = reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase ) + reformat_hex(__lowerCAmelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod() | 196 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__a = logging.getLogger(__name__)
@dataclass(frozen=UpperCamelCase__ )
class A__ :
"""simple docstring"""
UpperCamelCase_ : str
UpperCamelCase_ : str
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
@dataclass(frozen=UpperCamelCase__ )
class A__ :
"""simple docstring"""
UpperCamelCase_ : List[int]
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[Union[int, float]] = None
UpperCamelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class A__ ( UpperCamelCase__ ):
"""simple docstring"""
UpperCamelCase_ : List[InputFeatures]
def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : bool = False , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = hans_processors[task]()
_UpperCAmelCase : Any = os.path.join(
__a , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(__a ) , __a , ) , )
_UpperCAmelCase : Any = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : List[Any] = label_list[2], label_list[1]
_UpperCAmelCase : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_UpperCAmelCase : str = cached_features_file + ".lock"
with FileLock(__a ):
if os.path.exists(__a ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_UpperCAmelCase : Optional[int] = torch.load(__a )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_UpperCAmelCase : Dict = (
processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
)
logger.info("Training examples: %s" , len(__a ) )
_UpperCAmelCase : List[str] = hans_convert_examples_to_features(__a , __a , __a , __a )
logger.info("Saving features into cached file %s" , __a )
torch.save(self.features , __a )
def __len__( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class A__ :
"""simple docstring"""
UpperCamelCase_ : List[InputFeatures]
def __init__( self : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] = 1_2_8 , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : bool = False , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = hans_processors[task]()
_UpperCAmelCase : Optional[Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_UpperCAmelCase , _UpperCAmelCase : List[str] = label_list[2], label_list[1]
_UpperCAmelCase : List[str] = label_list
_UpperCAmelCase : Tuple = processor.get_dev_examples(__a ) if evaluate else processor.get_train_examples(__a )
_UpperCAmelCase : Any = hans_convert_examples_to_features(__a , __a , __a , __a )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(__a )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
_UpperCAmelCase : Tuple = tf.data.Dataset.from_generator(
__a , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
return self.dataset
def __len__( self : Any ) -> List[str]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return self.features[i]
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return self.label_list
class A__ ( UpperCamelCase__ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_train_set.txt" ) ) , "train" )
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[Any] ) -> List[str]:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(__a , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = []
for i, line in enumerate(__a ):
if i == 0:
continue
_UpperCAmelCase : Optional[int] = "%s-%s" % (set_type, line[0])
_UpperCAmelCase : Tuple = line[5]
_UpperCAmelCase : int = line[6]
_UpperCAmelCase : Any = line[7][2:] if line[7].startswith("ex" ) else line[7]
_UpperCAmelCase : Optional[Any] = line[0]
examples.append(InputExample(guid=__a , text_a=__a , text_b=__a , label=__a , pairID=__a ) )
return examples
def __UpperCAmelCase ( a_: List[InputExample], a_: List[str], a_: int, a_: PreTrainedTokenizer, ):
_UpperCAmelCase : List[Any] = {label: i for i, label in enumerate(snake_case_ )}
_UpperCAmelCase : List[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ), desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d" % (ex_index) )
_UpperCAmelCase : Dict = tokenizer(
example.text_a, example.text_b, add_special_tokens=snake_case_, max_length=snake_case_, padding="max_length", truncation=snake_case_, return_overflowing_tokens=snake_case_, )
_UpperCAmelCase : Tuple = label_map[example.label] if example.label in label_map else 0
_UpperCAmelCase : int = int(example.pairID )
features.append(InputFeatures(**snake_case_, label=snake_case_, pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
__a = {
'hans': 3,
}
__a = {
'hans': HansProcessor,
} | 145 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_: Dict =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ={}
class __A ( UpperCamelCase__ ):
a__ : int = """llama"""
a__ : Any = ["""past_key_values"""]
def __init__(self : List[str] , __a : List[str]=32000 , __a : Tuple=4096 , __a : List[Any]=11008 , __a : Dict=32 , __a : Tuple=32 , __a : Any=None , __a : Any="silu" , __a : List[Any]=2048 , __a : List[Any]=0.02 , __a : str=1E-6 , __a : Optional[Any]=True , __a : Union[str, Any]=0 , __a : Any=1 , __a : Dict=2 , __a : Dict=1 , __a : str=False , __a : str=None , **__a : Optional[Any] , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_key_value_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = rms_norm_eps
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , tie_word_embeddings=__a , **__a , )
def _lowercase (self : List[str] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __a ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f"""got {self.rope_scaling}""" )
UpperCAmelCase_ = self.rope_scaling.get("type" , __a )
UpperCAmelCase_ = self.rope_scaling.get("factor" , __a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__a , __a ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 1 | 0 |
from torch import nn
def A(__a: Tuple ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 22 |
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def A(__a: Dict ):
lowerCAmelCase_ = r"\w+[.]\d+"
lowerCAmelCase_ = re.findall(__a , __a )
for pat in pats:
lowerCAmelCase_ = key.replace(__a , "_".join(pat.split("." ) ) )
return key
def A(__a: str , __a: Tuple , __a: List[Any] ):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowerCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowerCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowerCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
lowerCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowerCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowerCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def A(__a: Dict , __a: Any , __a: List[Any]=42 ):
# Step 1: Convert pytorch tensor to numpy
lowerCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowerCAmelCase_ = flax_model.init_weights(PRNGKey(__a ) )
lowerCAmelCase_ = flatten_dict(__a )
lowerCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowerCAmelCase_ = rename_key(__a )
lowerCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
lowerCAmelCase_ , lowerCAmelCase_ = rename_key_and_reshape_tensor(__a , __a , __a )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
lowerCAmelCase_ = jnp.asarray(__a )
return unflatten_dict(__a )
| 22 | 1 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class a__( unittest.TestCase ):
def lowercase_ ( self : int ):
a : List[str] = tempfile.mkdtemp()
a : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
a : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
a : Optional[int] = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(__snake_case , __snake_case )
def lowercase_ ( self : str , **__snake_case : Dict ):
return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Dict , **__snake_case : str ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Dict , **__snake_case : Union[str, Any] ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : int ):
a : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
a : Dict = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Optional[int] ):
a : int = self.get_tokenizer()
a : Optional[Any] = self.get_rust_tokenizer()
a : Optional[Any] = self.get_image_processor()
a : Any = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_slow.save_pretrained(self.tmpdirname )
a : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__snake_case )
a : List[Any] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor_fast.save_pretrained(self.tmpdirname )
a : Tuple = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __snake_case )
self.assertIsInstance(processor_fast.tokenizer , __snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __snake_case )
self.assertIsInstance(processor_fast.image_processor , __snake_case )
def lowercase_ ( self : Union[str, Any] ):
a : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a : Dict = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
a : str = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
a : Union[str, Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = self.get_image_processor()
a : str = self.get_tokenizer()
a : List[str] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : List[Any] = self.prepare_image_inputs()
a : str = image_processor(__snake_case , return_tensors='np' )
a : Union[str, Any] = processor(images=__snake_case , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self : Tuple ):
a : Union[str, Any] = self.get_image_processor()
a : Tuple = self.get_tokenizer()
a : Union[str, Any] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Optional[Any] = 'lower newer'
a : List[Any] = processor(text=__snake_case )
a : Union[str, Any] = tokenizer(__snake_case , padding='max_length' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self : Union[str, Any] ):
a : Optional[int] = self.get_image_processor()
a : Any = self.get_tokenizer()
a : List[str] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Union[str, Any] = 'lower newer'
a : List[Any] = self.prepare_image_inputs()
a : str = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(__snake_case ):
processor()
def lowercase_ ( self : int ):
a : Union[str, Any] = self.get_image_processor()
a : Any = self.get_tokenizer()
a : Tuple = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a : Dict = processor.batch_decode(__snake_case )
a : Tuple = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def lowercase_ ( self : Dict ):
a : Any = self.get_image_processor()
a : Any = self.get_tokenizer()
a : Optional[Any] = AlignProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : List[str] = 'lower newer'
a : Any = self.prepare_image_inputs()
a : Dict = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 297 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: List[Any] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class a__( lowerCamelCase__ ):
lowercase__ = """roberta"""
def __init__( self : Tuple , __snake_case : List[str]=5_02_65 , __snake_case : int=7_68 , __snake_case : Union[str, Any]=12 , __snake_case : Dict=12 , __snake_case : Tuple=30_72 , __snake_case : Optional[Any]="gelu" , __snake_case : str=0.1 , __snake_case : Any=0.1 , __snake_case : str=5_12 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : int=1e-1_2 , __snake_case : str=1 , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=2 , __snake_case : Optional[int]="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Union[str, Any]=None , **__snake_case : str , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
a : List[str] = vocab_size
a : str = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : List[Any] = hidden_act
a : str = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Optional[int] = type_vocab_size
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Optional[int] = position_embedding_type
a : Dict = use_cache
a : Any = classifier_dropout
class a__( lowerCamelCase__ ):
@property
def lowercase_ ( self : int ):
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 297 | 1 |
'''simple docstring'''
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_a : List[Any] = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_a : Tuple = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _lowerCAmelCase ( lowercase , lowercase=False ) -> Dict:
__lowerCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , lowercase , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=lowercase , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _lowerCAmelCase ( lowercase ) -> Union[str, Any]:
__lowerCAmelCase = {}
__lowerCAmelCase = r'''.*sequential.(\d+).*'''
__lowerCAmelCase = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase = key.replace(lowercase , lowercase )
if re.match(lowercase , lowercase ):
# replace sequential layers with list
__lowerCAmelCase = re.match(lowercase , lowercase ).group(1 )
__lowerCAmelCase = key.replace(f'sequential.{sequential_layer}.' , f'layers.{int(lowercase )//3}.linear.' )
elif re.match(lowercase , lowercase ):
__lowerCAmelCase = int(re.match(lowercase , lowercase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase = 1 if projecton_layer == 0 else 2
__lowerCAmelCase = key.replace(f'_projection.{projecton_layer}.' , f'_projection.linear{transformers_projection_layer}.' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase = value
__lowerCAmelCase = mixed_qkv.size(0 ) // 3
__lowerCAmelCase = mixed_qkv[:qkv_dim]
__lowerCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase = query_layer
__lowerCAmelCase = key_layer
__lowerCAmelCase = value_layer
else:
__lowerCAmelCase = value
return model_state_dict
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase=False ) -> Union[str, Any]:
__lowerCAmelCase = init_clap(lowercase , enable_fusion=lowercase )
clap_model.eval()
__lowerCAmelCase = clap_model.state_dict()
__lowerCAmelCase = rename_state_dict(lowercase )
__lowerCAmelCase = ClapConfig()
__lowerCAmelCase = enable_fusion
__lowerCAmelCase = ClapModel(lowercase )
# ignore the spectrogram embedding layer
model.load_state_dict(lowercase , strict=lowercase )
model.save_pretrained(lowercase )
transformers_config.save_pretrained(lowercase )
if __name__ == "__main__":
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_a : str = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 360 |
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> List[str]:
# Initialise PyTorch model
__lowerCAmelCase = BertConfig.from_json_file(lowercase )
print(f'Building PyTorch model from configuration: {config}' )
__lowerCAmelCase = BertForPreTraining(lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(lowercase , lowercase , lowercase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowercase )
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_a : Optional[int] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 46 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : Tuple = {
'microsoft/unispeech-sat-base-100h-libri-ft': (
'https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json'
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = """unispeech-sat"""
def __init__( self : Optional[int] , UpperCamelCase__ : List[str]=32 , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : int=1E-5 , UpperCamelCase__ : Union[str, Any]="group" , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Optional[Any]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : List[str]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Dict=128 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Tuple=0.05 , UpperCamelCase__ : List[str]=10 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Dict=320 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=100 , UpperCamelCase__ : Union[str, Any]=256 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]="mean" , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : Union[str, Any]=(512, 512, 512, 512, 1500) , UpperCamelCase__ : Any=(5, 3, 3, 1, 1) , UpperCamelCase__ : Union[str, Any]=(1, 2, 3, 1, 1) , UpperCamelCase__ : List[str]=512 , UpperCamelCase__ : int=0 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[Any]=504 , **UpperCamelCase__ : List[str] , ) -> str:
"""simple docstring"""
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
__magic_name__ = hidden_size
__magic_name__ = feat_extract_norm
__magic_name__ = feat_extract_activation
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = conv_bias
__magic_name__ = num_conv_pos_embeddings
__magic_name__ = num_conv_pos_embedding_groups
__magic_name__ = len(self.conv_dim )
__magic_name__ = num_hidden_layers
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = num_attention_heads
__magic_name__ = hidden_dropout
__magic_name__ = attention_dropout
__magic_name__ = activation_dropout
__magic_name__ = feat_proj_dropout
__magic_name__ = final_dropout
__magic_name__ = layerdrop
__magic_name__ = layer_norm_eps
__magic_name__ = initializer_range
__magic_name__ = vocab_size
__magic_name__ = num_clusters
__magic_name__ = do_stable_layer_norm
__magic_name__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ = apply_spec_augment
__magic_name__ = mask_time_prob
__magic_name__ = mask_time_length
__magic_name__ = mask_time_min_masks
__magic_name__ = mask_feature_prob
__magic_name__ = mask_feature_length
__magic_name__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ = num_codevectors_per_group
__magic_name__ = num_codevector_groups
__magic_name__ = contrastive_logits_temperature
__magic_name__ = feat_quantizer_dropout
__magic_name__ = num_negatives
__magic_name__ = codevector_dim
__magic_name__ = proj_codevector_dim
__magic_name__ = diversity_loss_weight
# ctc loss
__magic_name__ = ctc_loss_reduction
__magic_name__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__magic_name__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = list(UpperCamelCase__ )
__magic_name__ = xvector_output_dim
@property
def _lowercase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 88 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A : Dict = logging.get_logger(__name__)
def __magic_name__ ( __snake_case : Any ) -> Any:
lowercase : Optional[Any] = DPTConfig()
if "large" in checkpoint_url:
lowercase : Optional[int] = 1024
lowercase : Dict = 4096
lowercase : Union[str, Any] = 24
lowercase : str = 16
lowercase : Dict = [5, 11, 17, 23]
lowercase : Any = [256, 512, 1024, 1024]
lowercase : Optional[Any] = (1, 384, 384)
if "ade" in checkpoint_url:
lowercase : List[Any] = True
lowercase : Union[str, Any] = 150
lowercase : Dict = "huggingface/label-files"
lowercase : Optional[Any] = "ade20k-id2label.json"
lowercase : Optional[int] = json.load(open(cached_download(hf_hub_url(__snake_case , __snake_case , repo_type="dataset" ) ) , "r" ) )
lowercase : List[Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : Optional[Any] = {v: k for k, v in idalabel.items()}
lowercase : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[int]:
lowercase : Optional[Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[Any]:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowercase : Tuple = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
lowercase : Tuple = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
lowercase : Optional[int] = name.replace("patch_embed" , "patch_embeddings" )
if "pos_embed" in name:
lowercase : List[Any] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
lowercase : str = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
lowercase : Any = name.replace("proj" , "projection" )
if "blocks" in name:
lowercase : Tuple = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
lowercase : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowercase : str = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name:
lowercase : Dict = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowercase : Any = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
lowercase : Optional[Any] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
lowercase : Optional[int] = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
lowercase : Any = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
lowercase : Tuple = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
lowercase : int = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
lowercase : Union[str, Any] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
lowercase : int = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowercase : Union[str, Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
lowercase : Tuple = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
lowercase : Union[str, Any] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
lowercase : List[Any] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
lowercase : Optional[int] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
lowercase : Union[str, Any] = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowercase : Any = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowercase : Dict = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
lowercase : Optional[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
lowercase : Union[str, Any] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
lowercase : Optional[int] = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
lowercase : str = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
lowercase : Any = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
lowercase : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
lowercase : Optional[int] = name.replace("bn" , "batch_norm" )
if "head" in name:
lowercase : Union[str, Any] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
lowercase : List[str] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
lowercase : Optional[Any] = name.replace("auxlayer" , "auxiliary_head.head" )
return name
def __magic_name__ ( __snake_case : str , __snake_case : str ) -> Any:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Union[str, Any] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
lowercase : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Optional[int] = in_proj_weight[: config.hidden_size, :]
lowercase : int = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : int = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Tuple = in_proj_bias[-config.hidden_size :]
def __magic_name__ ( ) -> int:
lowercase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowercase : str = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __snake_case : List[Any] , __snake_case : str , __snake_case : Tuple , __snake_case : Tuple ) -> Tuple:
lowercase , lowercase : Tuple = get_dpt_config(__snake_case )
# load original state_dict from URL
lowercase : List[Any] = torch.hub.load_state_dict_from_url(__snake_case , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__snake_case )
# rename keys
for key in state_dict.copy().keys():
lowercase : Any = state_dict.pop(__snake_case )
lowercase : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__snake_case , __snake_case )
# load HuggingFace model
lowercase : List[str] = DPTForSemanticSegmentation(__snake_case ) if "ade" in checkpoint_url else DPTForDepthEstimation(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
# Check outputs on an image
lowercase : Any = 480 if "ade" in checkpoint_url else 384
lowercase : Optional[Any] = DPTImageProcessor(size=__snake_case )
lowercase : Any = prepare_img()
lowercase : Union[str, Any] = image_processor(__snake_case , return_tensors="pt" )
# forward pass
lowercase : Optional[int] = model(**__snake_case ).logits if "ade" in checkpoint_url else model(**__snake_case ).predicted_depth
# Assert logits
lowercase : Optional[int] = torch.tensor([[6.31_99, 6.36_29, 6.41_48], [6.38_50, 6.36_15, 6.41_66], [6.35_19, 6.31_76, 6.35_75]] )
if "ade" in checkpoint_url:
lowercase : List[str] = torch.tensor([[4.04_80, 4.24_20, 4.43_60], [4.31_24, 4.56_93, 4.82_61], [4.57_68, 4.89_65, 5.21_63]] )
assert outputs.shape == torch.Size(__snake_case )
assert (
torch.allclose(outputs[0, 0, :3, :3] , __snake_case , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , __snake_case )
)
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("Pushing model to hub..." )
model.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=__snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(__snake_case , __snake_case ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=__snake_case , )
if __name__ == "__main__":
_A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
_A : Dict = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 202 | 0 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int = 10 , lowerCAmelCase__ :int = 22 ) -> int:
'''simple docstring'''
_a = range(1 , lowerCAmelCase__ )
_a = range(1 , lowerCAmelCase__ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 104 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
a_ : Tuple = logging.get_logger(__name__)
class a ( _SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = ["""input_features"""]
def __init__( self , __magic_name__=80 , __magic_name__=1_60_00 , __magic_name__=1_60 , __magic_name__=30 , __magic_name__=4_00 , __magic_name__=0.0 , __magic_name__=False , **__magic_name__ , ) -> Optional[int]:
super().__init__(
feature_size=__magic_name__ , sampling_rate=__magic_name__ , padding_value=__magic_name__ , return_attention_mask=__magic_name__ , **__magic_name__ , )
_a = n_fft
_a = hop_length
_a = chunk_length
_a = chunk_length * sampling_rate
_a = self.n_samples // hop_length
_a = sampling_rate
_a = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__magic_name__ , min_frequency=0.0 , max_frequency=8_0_0_0.0 , sampling_rate=__magic_name__ , norm='slaney' , mel_scale='slaney' , )
def __UpperCAmelCase ( self , __magic_name__ ) -> np.ndarray:
_a = spectrogram(
__magic_name__ , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
_a = log_spec[:, :-1]
_a = np.maximum(__magic_name__ , log_spec.max() - 8.0 )
_a = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __UpperCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ = 0.0 ) -> List[np.ndarray]:
if attention_mask is not None:
_a = np.array(__magic_name__ , np.intaa )
_a = []
for vector, length in zip(__magic_name__ , attention_mask.sum(-1 ) ):
_a = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 )
if length < normed_slice.shape[0]:
_a = padding_value
normed_input_values.append(__magic_name__ )
else:
_a = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values]
return normed_input_values
def __call__( self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = "max_length" , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , **__magic_name__ , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(__magic_name__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
_a = is_batched_numpy or (
isinstance(__magic_name__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__magic_name__ , np.ndarray ):
_a = np.asarray(__magic_name__ , dtype=np.floataa )
elif isinstance(__magic_name__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [np.asarray([raw_speech] ).T]
_a = BatchFeature({'input_features': raw_speech} )
# convert into correct format for padding
_a = self.pad(
__magic_name__ , padding=__magic_name__ , max_length=max_length if max_length else self.n_samples , truncation=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
_a = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
_a = np.stack(padded_inputs['input_features'] , axis=0 )
# make sure list is in array format
_a = padded_inputs.get('input_features' ).transpose(2 , 0 , 1 )
_a = [self._np_extract_fbank_features(__magic_name__ ) for waveform in input_features[0]]
if isinstance(input_features[0] , __magic_name__ ):
_a = [np.asarray(__magic_name__ , dtype=np.floataa ) for feature in input_features]
else:
_a = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
_a = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(__magic_name__ )
return padded_inputs
def __UpperCAmelCase ( self ) -> Dict[str, Any]:
_a = copy.deepcopy(self.__dict__ )
_a = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 104 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__A : Optional[Any] = logging.get_logger(__name__)
__A : Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__A : Union[str, Any] = {
"vocab_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"
),
"squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt",
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"squeezebert/squeezebert-uncased": (
"https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli": (
"https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"
),
"squeezebert/squeezebert-mnli-headless": (
"https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"
),
},
}
__A : int = {
"squeezebert/squeezebert-uncased": 512,
"squeezebert/squeezebert-mnli": 512,
"squeezebert/squeezebert-mnli-headless": 512,
}
__A : Union[str, Any] = {
"squeezebert/squeezebert-uncased": {"do_lower_case": True},
"squeezebert/squeezebert-mnli": {"do_lower_case": True},
"squeezebert/squeezebert-mnli-headless": {"do_lower_case": True},
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = SqueezeBertTokenizer
def __init__( self : str , lowerCamelCase : int=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : List[str]=True , lowerCamelCase : List[Any]="[UNK]" , lowerCamelCase : Dict="[SEP]" , lowerCamelCase : str="[PAD]" , lowerCamelCase : Union[str, Any]="[CLS]" , lowerCamelCase : List[str]="[MASK]" , lowerCamelCase : List[Any]=True , lowerCamelCase : List[str]=None , **lowerCamelCase : Tuple , ) -> str:
super().__init__(
lowerCamelCase , tokenizer_file=lowerCamelCase , do_lower_case=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , pad_token=lowerCamelCase , cls_token=lowerCamelCase , mask_token=lowerCamelCase , tokenize_chinese_chars=lowerCamelCase , strip_accents=lowerCamelCase , **lowerCamelCase , )
lowerCAmelCase_ : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowerCamelCase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowerCamelCase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowerCamelCase ) != tokenize_chinese_chars
):
lowerCAmelCase_ : Any = getattr(lowerCamelCase , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ : Union[str, Any] = do_lower_case
lowerCAmelCase_ : str = strip_accents
lowerCAmelCase_ : int = tokenize_chinese_chars
lowerCAmelCase_ : str = normalizer_class(**lowerCamelCase )
lowerCAmelCase_ : List[str] = do_lower_case
def __lowercase ( self : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : List[Any]=None ) -> str:
lowerCAmelCase_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowercase ( self : List[str] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase_ : str = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowercase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
lowerCAmelCase_ : Dict = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
| 120 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase_ ( A__ : int = 50_00 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , A__ )]
for i, pentagonal_i in enumerate(A__ ):
for j in range(A__ , len(A__ ) ):
lowerCAmelCase_ : int = pentagonal_nums[j]
lowerCAmelCase_ : Union[str, Any] = pentagonal_i + pentagonal_j
lowerCAmelCase_ : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(A__ ) and is_pentagonal(A__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Tuple = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
_lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
_lowerCamelCase : int = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
_lowerCamelCase : str = '''▁'''
class lowercase ( a ):
lowercase__ : int = VOCAB_FILES_NAMES
lowercase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any]=True , _UpperCamelCase : Union[str, Any]=True , _UpperCamelCase : Tuple=False , _UpperCamelCase : Optional[Any]="[CLS]" , _UpperCamelCase : Tuple="[SEP]" , _UpperCamelCase : Tuple="<unk>" , _UpperCamelCase : str="[SEP]" , _UpperCamelCase : Union[str, Any]="<pad>" , _UpperCamelCase : Any="[CLS]" , _UpperCamelCase : str="[MASK]" , _UpperCamelCase : Optional[Dict[str, Any]] = None , **_UpperCamelCase : List[str] , ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase )
else mask_token
)
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCamelCase )
@property
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
return len(self.sp_model )
def __snake_case( self : int ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self : List[Any] , _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : Dict ) -> Any:
'''simple docstring'''
if self.remove_space:
SCREAMING_SNAKE_CASE = " ".join(inputs.strip().split() )
else:
SCREAMING_SNAKE_CASE = inputs
SCREAMING_SNAKE_CASE = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
SCREAMING_SNAKE_CASE = unicodedata.normalize("NFKD" , _UpperCamelCase )
SCREAMING_SNAKE_CASE = "".join([c for c in outputs if not unicodedata.combining(_UpperCamelCase )] )
if self.do_lower_case:
SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def __snake_case( self : Optional[int] , _UpperCamelCase : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.preprocess_text(_UpperCamelCase )
SCREAMING_SNAKE_CASE = self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(_UpperCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(_UpperCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_UpperCamelCase )
else:
new_pieces.append(_UpperCamelCase )
return new_pieces
def __snake_case( self : Optional[Any] , _UpperCamelCase : Dict ) -> str:
'''simple docstring'''
return self.sp_model.PieceToId(_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
return self.sp_model.IdToPiece(_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_UpperCamelCase ) + token
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(_UpperCamelCase )
SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(_UpperCamelCase )
return out_string.strip()
def __snake_case( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case( self : Tuple , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None , _UpperCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase )) + [1]
return [1] + ([0] * len(_UpperCamelCase )) + [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case( self : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_UpperCamelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , "wb" ) as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
| 206 | from __future__ import annotations
from collections.abc import Iterator
class lowercase :
def __init__( self : str , _UpperCamelCase : int ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
class lowercase :
def __init__( self : str , _UpperCamelCase : Node ) -> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE = tree
def __snake_case( self : int , _UpperCamelCase : Node | None ) -> int:
'''simple docstring'''
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : List[Any] ) -> Iterator[int]:
'''simple docstring'''
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 206 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
snake_case : str = HfArgumentParser(InitializationArguments)
snake_case : Tuple = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
snake_case : str = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
snake_case : Tuple = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
snake_case : Any = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 94 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : int = 0.00
_snake_case : int = 0
for resistor in resistors:
if resistor <= 0:
_snake_case : Dict = F"Resistor at index {index} has a negative or zero value!"
raise ValueError(snake_case__ )
first_sum += 1 / float(snake_case__ )
index += 1
return 1 / first_sum
def UpperCAmelCase__ (snake_case__ : list[float] ):
"""simple docstring"""
_snake_case : Union[str, Any] = 0.00
_snake_case : Any = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
_snake_case : Any = F"Resistor at index {index} has a negative value!"
raise ValueError(snake_case__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 | 0 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class a ( nn.Module ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 0.0
_lowerCAmelCase = 1
_lowerCAmelCase = 1
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = jnp.floataa
def __UpperCAmelCase ( self ) -> List[Any]:
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
_a = resnets
_a = attentions
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> str:
_a = ()
for resnet, attn in zip(self.resnets , self.attentions ):
_a = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
_a = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class a ( nn.Module ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 0.0
_lowerCAmelCase = 1
_lowerCAmelCase = True
_lowerCAmelCase = jnp.floataa
def __UpperCAmelCase ( self ) -> Optional[Any]:
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
_a = resnets
if self.add_downsample:
_a = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Union[str, Any]:
_a = ()
for resnet in self.resnets:
_a = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
_a = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class a ( nn.Module ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 0.0
_lowerCAmelCase = 1
_lowerCAmelCase = 1
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = jnp.floataa
def __UpperCAmelCase ( self ) -> Tuple:
_a = []
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
_a = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
_a = resnets
_a = attentions
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[str]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
_a = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
_a = self.upsamplers_a(__magic_name__ )
return hidden_states
class a ( nn.Module ):
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 42
_lowerCAmelCase = 0.0
_lowerCAmelCase = 1
_lowerCAmelCase = True
_lowerCAmelCase = jnp.floataa
def __UpperCAmelCase ( self ) -> str:
_a = []
for i in range(self.num_layers ):
_a = self.in_channels if (i == self.num_layers - 1) else self.out_channels
_a = self.prev_output_channel if i == 0 else self.out_channels
_a = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
_a = resnets
if self.add_upsample:
_a = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Dict:
for resnet in self.resnets:
# pop res hidden states
_a = res_hidden_states_tuple[-1]
_a = res_hidden_states_tuple[:-1]
_a = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
_a = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
_a = self.upsamplers_a(__magic_name__ )
return hidden_states
class a ( nn.Module ):
_lowerCAmelCase = 42
_lowerCAmelCase = 0.0
_lowerCAmelCase = 1
_lowerCAmelCase = 1
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = jnp.floataa
def __UpperCAmelCase ( self ) -> int:
# there is always at least one resnet
_a = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
_a = []
for _ in range(self.num_layers ):
_a = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
_a = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
_a = resnets
_a = attentions
def __call__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Dict:
_a = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
_a = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
_a = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 104 |
'''simple docstring'''
def _A (lowerCAmelCase__ :list[int] , lowerCAmelCase__ :list[int] ) -> None:
'''simple docstring'''
_a = len(lowerCAmelCase__ )
print('The following activities are selected:' )
# The first activity is always selected
_a = 0
print(lowerCAmelCase__ , end=',' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=',' )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ : List[str] = [1, 3, 0, 5, 8, 5]
a_ : str = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 104 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = BlipImageProcessor()
A__ = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
A__ = BlipProcessor(__lowerCamelCase,__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).tokenizer
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = BlipProcessor(tokenizer=self.get_tokenizer(),image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token='''(BOS)''',eos_token='''(EOS)''' )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = BlipProcessor.from_pretrained(
self.tmpdirname,bos_token='''(BOS)''',eos_token='''(EOS)''',do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(),tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer,__lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = '''lower newer'''
A__ = processor(text=__lowerCamelCase )
A__ = tokenizer(__lowerCamelCase,return_token_type_ids=__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key],encoded_processor[key] )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = '''lower newer'''
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCamelCase,images=__lowerCamelCase )
self.assertListEqual(list(inputs.keys() ),['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(__lowerCamelCase ):
processor()
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCamelCase )
A__ = tokenizer.batch_decode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = BlipProcessor(tokenizer=__lowerCamelCase,image_processor=__lowerCamelCase )
A__ = '''lower newer'''
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCamelCase,images=__lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ),['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 193 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = (DDIMParallelScheduler,)
__SCREAMING_SNAKE_CASE = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__lowerCamelCase )
return config
def UpperCamelCase ( self,**__lowerCamelCase ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(**__lowerCamelCase )
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCamelCase )
for t in scheduler.timesteps:
A__ = model(__lowerCamelCase,__lowerCamelCase )
A__ = scheduler.step(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ).prev_sample
return sample
def UpperCamelCase ( self ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCamelCase )
def UpperCamelCase ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__lowerCamelCase )
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config(steps_offset=1 )
A__ = scheduler_class(**__lowerCamelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps,torch.LongTensor([801, 601, 401, 201, 1] ) )
def UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1],[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__lowerCamelCase,beta_end=__lowerCamelCase )
def UpperCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__lowerCamelCase )
def UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__lowerCamelCase )
def UpperCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__lowerCamelCase )
def UpperCamelCase ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__lowerCamelCase )
def UpperCamelCase ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__lowerCamelCase )
def UpperCamelCase ( self ):
self.check_over_configs(thresholding=__lowerCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__lowerCamelCase,prediction_type=__lowerCamelCase,sample_max_value=__lowerCamelCase,)
def UpperCamelCase ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, num_inference_steps in zip([1, 10, 50],[10, 50, 500] ):
self.check_over_forward(time_step=__lowerCamelCase,num_inference_steps=__lowerCamelCase )
def UpperCamelCase ( self ):
for t, eta in zip([1, 10, 49],[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__lowerCamelCase,eta=__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420,400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980,960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487,486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999,998 ) - 0.02 ) ) < 1E-5
def UpperCamelCase ( self ):
A__ = self.scheduler_classes[0]
A__ = self.get_scheduler_config()
A__ = scheduler_class(**__lowerCamelCase )
A__ , A__ = 10, 0.0
scheduler.set_timesteps(__lowerCamelCase )
A__ = self.dummy_model()
A__ = self.dummy_sample_deter
A__ = self.dummy_sample_deter + 0.1
A__ = self.dummy_sample_deter - 0.1
A__ = samplea.shape[0]
A__ = torch.stack([samplea, samplea, samplea],dim=0 )
A__ = torch.arange(__lowerCamelCase )[0:3, None].repeat(1,__lowerCamelCase )
A__ = model(samples.flatten(0,1 ),timesteps.flatten(0,1 ) )
A__ = scheduler.batch_step_no_noise(__lowerCamelCase,timesteps.flatten(0,1 ),samples.flatten(0,1 ),__lowerCamelCase )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop()
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def UpperCamelCase ( self ):
A__ = self.full_loop(prediction_type='''v_prediction''' )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def UpperCamelCase ( self ):
# We specify different beta, so that the first alpha is 0.99
A__ = self.full_loop(set_alpha_to_one=__lowerCamelCase,beta_start=0.01 )
A__ = torch.sum(torch.abs(__lowerCamelCase ) )
A__ = torch.mean(torch.abs(__lowerCamelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 193 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE ( A__ ):
"""simple docstring"""
lowerCamelCase : Any = "luke"
def __init__( self : Dict , lowerCAmelCase : Any=5_02_67 , lowerCAmelCase : List[str]=50_00_00 , lowerCAmelCase : Any=7_68 , lowerCAmelCase : Dict=2_56 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : str=12 , lowerCAmelCase : int=30_72 , lowerCAmelCase : Dict="gelu" , lowerCAmelCase : str=0.1 , lowerCAmelCase : Union[str, Any]=0.1 , lowerCAmelCase : Any=5_12 , lowerCAmelCase : Tuple=2 , lowerCAmelCase : str=0.02 , lowerCAmelCase : str=1e-12 , lowerCAmelCase : Any=True , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : str=1 , lowerCAmelCase : List[str]=0 , lowerCAmelCase : Optional[int]=2 , **lowerCAmelCase : List[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : List[Any] = entity_vocab_size
__lowerCAmelCase : Optional[int] = hidden_size
__lowerCAmelCase : List[Any] = entity_emb_size
__lowerCAmelCase : List[str] = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = max_position_embeddings
__lowerCAmelCase : List[Any] = type_vocab_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : int = use_entity_aware_attention
__lowerCAmelCase : Tuple = classifier_dropout
| 353 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =(EulerDiscreteScheduler,)
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Tuple = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = output.prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : List[Any] = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Dict = output.prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : Dict = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Any = output.prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : int = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : List[Any] = output.prev_sample
__lowerCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 139 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
a : List[Any] = BeautifulSoup(requests.get(A_ , params=A_ ).content , "html.parser" )
a : Tuple = soup.find("div" , attrs={"class": "gs_ri"} )
a : Any = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
__lowercase = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 30,
"""pages""": """3979-3990""",
"""year""": 2018,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 40 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCAmelCase : Union[str, Any] = 16
_lowerCAmelCase : List[str] = 32
def UpperCamelCase_( _snake_case : Accelerator , _snake_case : int = 16 ):
"""simple docstring"""
__a =AutoTokenizer.from_pretrained('bert-base-cased' )
__a =load_dataset('glue' , 'mrpc' )
def tokenize_function(_snake_case : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
__a =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_snake_case , max_length=_snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a =datasets.map(
_snake_case , batched=_snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_snake_case : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a =16
elif accelerator.mixed_precision != "no":
__a =8
else:
__a =None
return tokenizer.pad(
_snake_case , padding='longest' , max_length=_snake_case , pad_to_multiple_of=_snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
__a =DataLoader(
tokenized_datasets['train'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
__a =DataLoader(
tokenized_datasets['validation'] , shuffle=_snake_case , collate_fn=_snake_case , batch_size=_snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowerCAmelCase : List[Any] = mocked_dataloaders # noqa: F811
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Union[str, Any] ):
"""simple docstring"""
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _snake_case ) == "1":
__a =2
# New Code #
__a =int(args.gradient_accumulation_steps )
__a =int(args.local_sgd_steps )
# Initialize accelerator
__a =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_snake_case )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a =config['lr']
__a =int(config['num_epochs'] )
__a =int(config['seed'] )
__a =int(config['batch_size'] )
__a =evaluate.load('glue' , 'mrpc' )
set_seed(_snake_case )
__a , __a =get_dataloaders(_snake_case , _snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a =AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a =model.to(accelerator.device )
# Instantiate optimizer
__a =AdamW(params=model.parameters() , lr=_snake_case )
# Instantiate scheduler
__a =get_linear_schedule_with_warmup(
optimizer=_snake_case , num_warmup_steps=100 , num_training_steps=(len(_snake_case ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a =accelerator.prepare(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
# Now we train the model
for epoch in range(_snake_case ):
model.train()
with LocalSGD(
accelerator=_snake_case , model=_snake_case , local_sgd_steps=_snake_case , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_snake_case ):
__a =model(**_snake_case )
__a =output.loss
accelerator.backward(_snake_case )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(_snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a =model(**_snake_case )
__a =outputs.logits.argmax(dim=-1 )
__a , __a =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_snake_case , references=_snake_case , )
__a =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _snake_case )
def UpperCamelCase_( ):
"""simple docstring"""
__a =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_snake_case , default=_snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument(
'--local_sgd_steps' , type=_snake_case , default=8 , help='Number of local SGD steps or None to disable local SGD' )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a =parser.parse_args()
__a ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_snake_case , _snake_case )
if __name__ == "__main__":
main()
| 218 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _lowercase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict=1_0_2_4 , snake_case : Dict=1_0_2_4 , snake_case : Optional[int]=3.6 ) -> int:
"""simple docstring"""
UpperCamelCase_ : int = tokenizer
UpperCamelCase_ : Optional[int] = tokenizer.bos_token_id
UpperCamelCase_ : Dict = dataset
UpperCamelCase_ : Union[str, Any] = seq_length
UpperCamelCase_ : Tuple = seq_length * chars_per_token * num_of_sequences
def __iter__( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Tuple = iter(self.dataset )
UpperCamelCase_ : Union[str, Any] = True
while more_examples:
UpperCamelCase_ : List[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(__snake_case )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
UpperCamelCase_ : Dict = False
break
UpperCamelCase_ : Dict = tokenizer(__snake_case , truncation=__snake_case )['input_ids']
UpperCamelCase_ : int = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(__snake_case ) , self.seq_length ):
UpperCamelCase_ : str = all_token_ids[i : i + self.seq_length]
if len(__snake_case ) == self.seq_length:
yield torch.tensor(__snake_case )
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Union[str, Any] = {'streaming': True}
UpperCamelCase_ : List[Any] = load_dataset(args.dataset_name , split='train' , **_A )
UpperCamelCase_ : Optional[Any] = ConstantLengthDataset(_A , _A , seq_length=args.seq_length )
UpperCamelCase_ : Union[str, Any] = DataLoader(_A , batch_size=args.batch_size )
return eval_dataloader
def __lowercase ( lowerCamelCase : List[str] ):
model.eval()
UpperCamelCase_ : str = []
for step, batch in enumerate(_A ):
with torch.no_grad():
UpperCamelCase_ : Optional[int] = model(_A , labels=_A )
UpperCamelCase_ : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(_A ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
UpperCamelCase_ : str = torch.mean(torch.cat(_A ) )
try:
UpperCamelCase_ : Dict = torch.exp(_A )
except OverflowError:
UpperCamelCase_ : Dict = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
a_ = Accelerator()
# Parse configuration
a_ = HfArgumentParser(EvaluationArguments)
a_ = parser.parse_args()
set_seed(args.seed)
# Logging
a_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
a_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
a_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
a_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
a_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
a_ = evaluate(args)
logger.info(F"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 357 | from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFMobileBertModel,
'fill-mask': TFMobileBertForMaskedLM,
'question-answering': TFMobileBertForQuestionAnswering,
'text-classification': TFMobileBertForSequenceClassification,
'token-classification': TFMobileBertForTokenClassification,
'zero-shot': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Dict=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
UpperCamelCase_ : Optional[int] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class _lowercase ( snake_case_ ):
def __init__( self : Tuple , snake_case : Optional[int] , snake_case : Optional[Any]=1_3 , snake_case : Optional[Any]=7 , snake_case : Any=True , snake_case : Optional[int]=True , snake_case : Union[str, Any]=True , snake_case : Optional[Any]=True , snake_case : List[Any]=9_9 , snake_case : int=3_2 , snake_case : str=3_2 , snake_case : str=2 , snake_case : List[Any]=4 , snake_case : Tuple=3_7 , snake_case : Any="gelu" , snake_case : str=0.1 , snake_case : Tuple=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Optional[int]=1_6 , snake_case : List[Any]=2 , snake_case : Dict=0.02 , snake_case : List[str]=3 , snake_case : Any=4 , snake_case : Any=None , ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = parent
UpperCamelCase_ : Any = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : List[Any] = is_training
UpperCamelCase_ : Optional[Any] = use_input_mask
UpperCamelCase_ : Tuple = use_token_type_ids
UpperCamelCase_ : Optional[int] = use_labels
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Dict = hidden_size
UpperCamelCase_ : List[str] = num_hidden_layers
UpperCamelCase_ : Tuple = num_attention_heads
UpperCamelCase_ : Optional[int] = intermediate_size
UpperCamelCase_ : int = hidden_act
UpperCamelCase_ : List[str] = hidden_dropout_prob
UpperCamelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase_ : Tuple = max_position_embeddings
UpperCamelCase_ : Tuple = type_vocab_size
UpperCamelCase_ : Optional[Any] = type_sequence_label_size
UpperCamelCase_ : Any = initializer_range
UpperCamelCase_ : Tuple = num_labels
UpperCamelCase_ : Tuple = num_choices
UpperCamelCase_ : Tuple = scope
UpperCamelCase_ : Dict = embedding_size
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Optional[int] = None
UpperCamelCase_ : Tuple = None
UpperCamelCase_ : Dict = None
if self.use_labels:
UpperCamelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ : Union[str, Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : str = TFMobileBertModel(config=snake_case )
UpperCamelCase_ : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Union[str, Any] = model(snake_case )
UpperCamelCase_ : Optional[Any] = [input_ids, input_mask]
UpperCamelCase_ : List[Any] = model(snake_case )
UpperCamelCase_ : Union[str, Any] = model(snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Dict , snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForMaskedLM(config=snake_case )
UpperCamelCase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : Any , snake_case : int , snake_case : int , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = TFMobileBertForNextSentencePrediction(config=snake_case )
UpperCamelCase_ : Any = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : Optional[Any] , snake_case : List[Any] , snake_case : int , snake_case : str , snake_case : str , snake_case : Any , snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[str] = TFMobileBertForPreTraining(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Any = model(snake_case )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Dict , snake_case : List[str] , snake_case : str , snake_case : List[str] , snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.num_labels
UpperCamelCase_ : Dict = TFMobileBertForSequenceClassification(config=snake_case )
UpperCamelCase_ : List[Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : List[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Tuple , snake_case : Any , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Any ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.num_choices
UpperCamelCase_ : Dict = TFMobileBertForMultipleChoice(config=snake_case )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : int = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : List[str] = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase_ : Optional[Any] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
UpperCamelCase_ : int = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Tuple , snake_case : str , snake_case : str , snake_case : Optional[int] , snake_case : str , snake_case : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Any = self.num_labels
UpperCamelCase_ : Optional[Any] = TFMobileBertForTokenClassification(config=snake_case )
UpperCamelCase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : Tuple , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[str] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = TFMobileBertForQuestionAnswering(config=snake_case )
UpperCamelCase_ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
UpperCamelCase_ : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
), (
UpperCamelCase_
),
) : Union[str, Any] = config_and_inputs
UpperCamelCase_ : Tuple = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
UpperCamelCase_ : str = ConfigTester(self , config_class=snake_case , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> str:
"""simple docstring"""
UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
UpperCamelCase_ : Optional[Any] = TFMobileBertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
class _lowercase ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Any = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased' )
UpperCamelCase_ : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase_ : List[str] = model(snake_case )[0]
UpperCamelCase_ : Any = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , snake_case )
UpperCamelCase_ : Dict = tf.constant(
[
[
[-4.5919547, -9.248295, -9.645256],
[-6.7306175, -6.440284, -6.6052837],
[-7.2743506, -6.7847915, -6.024673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1e-4 )
| 50 | 0 |
from __future__ import annotations
from typing import Any
class __a :
def __init__( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
lowercase__: Tuple = num_of_nodes
lowercase__: list[list[int]] = []
lowercase__: dict[int, int] = {}
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase__: str = self.find_component(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
lowercase__: Union[str, Any] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
lowercase__: List[str] = self.find_component(lowerCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> None:
'''simple docstring'''
lowercase__: Tuple = []
lowercase__: int = 0
lowercase__: list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase__: int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase__ , lowercase__ , lowercase__: Optional[int] = edge
lowercase__: Optional[int] = self.m_component[u]
lowercase__: Dict = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase__: Optional[int] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase__ , lowercase__ , lowercase__: int = edge
lowercase__: str = self.m_component[u]
lowercase__: Optional[int] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
print(F'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
lowercase__: Dict = [-1] * self.m_num_of_nodes
print(F'The total weight of the minimal spanning tree is: {mst_weight}' )
def snake_case_ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 196 | 1 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger()
@dataclass
class a :
_lowercase = 4_2
_lowercase = field(default_factory=UpperCAmelCase )
_lowercase = field(default_factory=UpperCAmelCase )
def _UpperCAmelCase ( self , A_ , A_ , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A_ )
def __call__( self , A_ ):
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A_ )
[x.remove() for x in self.handles]
return self
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return list(filter(lambda A_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a :
_lowercase = 4_2
_lowercase = 4_2
_lowercase = 0
_lowercase = field(default_factory=UpperCAmelCase )
_lowercase = field(default_factory=UpperCAmelCase )
def __call__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[str] = Tracker(self.dest )(A_ ).parametrized
_UpperCAmelCase : List[Any] = Tracker(self.src )(A_ ).parametrized
_UpperCAmelCase : Union[str, Any] = list(filter(lambda A_ : type(A_ ) not in self.src_skip , A_ ) )
_UpperCAmelCase : Optional[Any] = list(filter(lambda A_ : type(A_ ) not in self.dest_skip , A_ ) )
if len(A_ ) != len(A_ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A_ )} operations while'
f' destination module has {len(A_ )}.' )
for dest_m, src_m in zip(A_ , A_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: ResNetConfig , lowerCAmelCase: Path , lowerCAmelCase: bool = True ) -> int:
print(F'Converting {name}...' )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase ).eval()
_UpperCAmelCase : Optional[Any] = ResNetForImageClassification(lowerCAmelCase ).eval()
_UpperCAmelCase : Optional[int] = ModuleTransfer(src=lowerCAmelCase , dest=lowerCAmelCase )
_UpperCAmelCase : Tuple = torch.randn((1, 3, 224, 224) )
module_transfer(lowerCAmelCase )
assert torch.allclose(from_model(lowerCAmelCase ) , our_model(lowerCAmelCase ).logits ), "The model logits don't match the original one."
_UpperCAmelCase : Dict = F'resnet{"-".join(name.split("resnet" ) )}'
print(lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=lowerCAmelCase , )
# we can use the convnext one
_UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=lowerCAmelCase , )
print(F'Pushed {checkpoint_name}' )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Path , lowerCAmelCase: str = None , lowerCAmelCase: bool = True ) -> Optional[Any]:
_UpperCAmelCase : List[Any] = "imagenet-1k-id2label.json"
_UpperCAmelCase : Tuple = 1000
_UpperCAmelCase : str = (1, num_labels)
_UpperCAmelCase : List[Any] = "huggingface/label-files"
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : int = json.load(open(hf_hub_download(lowerCAmelCase , lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Union[str, Any] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : int = idalabel
_UpperCAmelCase : Optional[int] = {v: k for k, v in idalabel.items()}
_UpperCAmelCase : List[str] = partial(lowerCAmelCase , num_labels=lowerCAmelCase , idalabel=lowerCAmelCase , labelaid=lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(lowerCAmelCase , names_to_config[model_name] , lowerCAmelCase , lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help=(
'The name of the model you wish to convert, it must be one of the supported resnet* architecture,'
' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=Path,
required=True,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
default=True,
type=bool,
required=False,
help='If True, push model and image processor to the hub.',
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
SCREAMING_SNAKE_CASE_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 352 |
from __future__ import annotations
from random import choice
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> Optional[int]:
return choice(lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[int] , lowerCAmelCase: int ) -> int:
_UpperCAmelCase : List[Any] = random_pivot(lowerCAmelCase )
# partition based on pivot
# linear time
_UpperCAmelCase : List[str] = [e for e in lst if e < pivot]
_UpperCAmelCase : Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(lowerCAmelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(lowerCAmelCase ) < k - 1:
return kth_number(lowerCAmelCase , k - len(lowerCAmelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189 | 0 |
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ ( lowerCAmelCase_ ):
def lowercase ( self : Optional[int] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case_ , "embed_dim" ) )
self.parent.assertTrue(hasattr(snake_case_ , "num_heads" ) )
class A_ :
def __init__( self : Any , snake_case_ : Tuple , snake_case_ : List[Any]=1_3 , snake_case_ : Any=6_4 , snake_case_ : str=3 , snake_case_ : List[str]=[1_6, 4_8, 9_6] , snake_case_ : Union[str, Any]=[1, 3, 6] , snake_case_ : List[Any]=[1, 2, 1_0] , snake_case_ : Union[str, Any]=[7, 3, 3] , snake_case_ : Optional[Any]=[4, 2, 2] , snake_case_ : str=[2, 1, 1] , snake_case_ : List[Any]=[2, 2, 2] , snake_case_ : str=[False, False, True] , snake_case_ : Union[str, Any]=[0.0, 0.0, 0.0] , snake_case_ : List[str]=0.0_2 , snake_case_ : List[Any]=1e-12 , snake_case_ : Any=True , snake_case_ : Tuple=True , snake_case_ : str=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowercase ( self : List[str] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : Any ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowercase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : int , snake_case_ : Tuple ):
_UpperCAmelCase = CvtModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowercase ( self : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : int ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Any ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
_lowerCamelCase : Any = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_lowerCamelCase : str = (
{"""feature-extraction""": CvtModel, """image-classification""": CvtForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : Dict = False
_lowerCamelCase : Optional[Any] = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Union[str, Any] = False
def lowercase ( self : Tuple ):
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def lowercase ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Optional[Any] ):
return
@unittest.skip(reason="Cvt does not output attentions" )
def lowercase ( self : Tuple ):
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def lowercase ( self : int ):
pass
def lowercase ( self : str ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowercase ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowercase ( self : Optional[int] ):
def check_hidden_states_output(snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
_UpperCAmelCase = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowercase ( self : Dict ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase ( self : Dict ):
pass
@slow
def lowercase ( self : Optional[int] ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def UpperCAmelCase_ ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Any ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase ( self : Union[str, Any] ):
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case_ , return_tensors="pt" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case_ )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = torch.tensor([0.9_2_8_5, 0.9_0_1_5, -0.3_1_5_0] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1e-4 ) )
| 22 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__SCREAMING_SNAKE_CASE :Optional[int] = TypeVar('''T''')
class A_ ( Generic[T] ):
def __init__( self : List[Any] , snake_case_ : list[T] , snake_case_ : Callable[[T, T], T] ):
_UpperCAmelCase = None
_UpperCAmelCase = len(snake_case_ )
_UpperCAmelCase = [any_type for _ in range(self.N )] + arr
_UpperCAmelCase = fnc
self.build()
def lowercase ( self : List[Any] ):
for p in range(self.N - 1 , 0 , -1 ):
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Optional[Any] , snake_case_ : int , snake_case_ : T ):
p += self.N
_UpperCAmelCase = v
while p > 1:
_UpperCAmelCase = p // 2
_UpperCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase ( self : Any , snake_case_ : int , snake_case_ : int ): # noqa: E741
_UpperCAmelCase , _UpperCAmelCase = l + self.N, r + self.N
_UpperCAmelCase = None
while l <= r:
if l % 2 == 1:
_UpperCAmelCase = self.st[l] if res is None else self.fn(snake_case_ , self.st[l] )
if r % 2 == 0:
_UpperCAmelCase = self.st[r] if res is None else self.fn(snake_case_ , self.st[r] )
_UpperCAmelCase , _UpperCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__SCREAMING_SNAKE_CASE :Union[str, Any] = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__SCREAMING_SNAKE_CASE :List[str] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, min)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, max)
__SCREAMING_SNAKE_CASE :Any = SegmentTree(test_array, lambda a, b: a + b)
def UpperCAmelCase_ ( ) -> None:
'''simple docstring'''
for i in range(len(__lowercase ) ):
for j in range(__lowercase , len(__lowercase ) ):
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(__lowercase , test_array[i : j + 1] )
_UpperCAmelCase = reduce(lambda __lowercase , __lowercase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__lowercase , __lowercase )
assert max_range == max_segment_tree.query(__lowercase , __lowercase )
assert sum_range == sum_segment_tree.query(__lowercase , __lowercase )
test_all_segments()
for index, value in test_updates.items():
__SCREAMING_SNAKE_CASE :str = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 22 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Tuple ="""rwkv"""
UpperCamelCase__ : str ={"""max_position_embeddings""": """context_length"""}
def __init__( self : Optional[Any], __lowercase : Dict=5_0277, __lowercase : Dict=1024, __lowercase : Optional[int]=4096, __lowercase : Tuple=32, __lowercase : Dict=None, __lowercase : Any=None, __lowercase : Optional[int]=1e-5, __lowercase : str=0, __lowercase : List[Any]=0, __lowercase : Tuple=6, __lowercase : Tuple=False, __lowercase : Optional[int]=True, **__lowercase : str, ):
lowercase__ = vocab_size
lowercase__ = context_length
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = attention_hidden_size if attention_hidden_size is not None else hidden_size
lowercase__ = intermediate_size if intermediate_size is not None else 4 * hidden_size
lowercase__ = layer_norm_epsilon
lowercase__ = rescale_every
lowercase__ = use_cache
lowercase__ = bos_token_id
lowercase__ = eos_token_id
super().__init__(
tie_word_embeddings=__lowercase, bos_token_id=__lowercase, eos_token_id=__lowercase, **__lowercase )
| 224 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 224 | 1 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 98 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(SCREAMING_SNAKE_CASE )-1}' )
if "norm" in key:
lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(SCREAMING_SNAKE_CASE )-1}' )
if "layer_norm1" in key:
lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(SCREAMING_SNAKE_CASE )-1}' )
if "attn.q" in key:
lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(SCREAMING_SNAKE_CASE )-1}' )
if "bot_conv" in key:
lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase = value
return new_state_dict
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any]=False , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
'''simple docstring'''
lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase = GLPNImageProcessor()
# prepare image
lowerCAmelCase = prepare_img()
lowerCAmelCase = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase = rename_keys(SCREAMING_SNAKE_CASE )
# key and value matrices need special treatment
read_in_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase = GLPNForDepthEstimation(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# forward pass
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase = torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
lowerCAmelCase = torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 46 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
def decorator(__SCREAMING_SNAKE_CASE : List[str] ):
__snake_case : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
handle += [key]
setattr(__SCREAMING_SNAKE_CASE , """handle_key""" , __SCREAMING_SNAKE_CASE )
return func
return decorator
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
def decorator(__SCREAMING_SNAKE_CASE : Dict ):
__snake_case : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , """handle_key""" , [] )
handle += keys
setattr(__SCREAMING_SNAKE_CASE , """handle_key""" , __SCREAMING_SNAKE_CASE )
return func
return decorator
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
def __new__( cls : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] ):
__snake_case : Tuple = super().__new__(cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if not hasattr(_lowerCAmelCase , """key_handler""" ):
setattr(_lowerCAmelCase , """key_handler""" , {} )
setattr(_lowerCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__snake_case : Optional[Any] = getattr(_lowerCAmelCase , """handle_key""" , [] )
for key in handled_keys:
__snake_case : int = value
return new_cls
@staticmethod
def snake_case__ ( cls : Optional[Any] ):
__snake_case : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
__snake_case : Tuple = ord(_lowerCAmelCase )
__snake_case : List[Any] = cls.key_handler.get(_lowerCAmelCase )
if handler:
__snake_case : Union[str, Any] = char
return handler(cls )
else:
return None
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 20 | import random
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
__snake_case , __snake_case , __snake_case : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(__SCREAMING_SNAKE_CASE )
elif element > pivot:
greater.append(__SCREAMING_SNAKE_CASE )
else:
equal.append(__SCREAMING_SNAKE_CASE )
return less, equal, greater
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(__SCREAMING_SNAKE_CASE ) or index < 0:
return None
__snake_case : int = items[random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 )]
__snake_case : Tuple = 0
__snake_case , __snake_case , __snake_case : List[str] = _partition(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__snake_case : Optional[Any] = len(__SCREAMING_SNAKE_CASE )
__snake_case : int = len(__SCREAMING_SNAKE_CASE )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# must be in larger
else:
return quick_select(__SCREAMING_SNAKE_CASE , index - (m + count) )
| 20 | 1 |
'''simple docstring'''
def _A ( A__ = 1000 ):
"""simple docstring"""
__lowercase , __lowercase = 1, 1
__lowercase = []
for i in range(1 , n + 1 ):
__lowercase = prev_numerator + 2 * prev_denominator
__lowercase = prev_numerator + prev_denominator
if len(str(A__ ) ) > len(str(A__ ) ):
result.append(A__ )
__lowercase = numerator
__lowercase = denominator
return len(A__ )
if __name__ == "__main__":
print(f'{solution() = }')
| 104 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''',
}
class lowercase_ (lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = 'convnextv2'
def __init__( self : Tuple ,lowercase__ : Dict=3 ,lowercase__ : int=4 ,lowercase__ : Optional[int]=4 ,lowercase__ : Optional[Any]=None ,lowercase__ : List[str]=None ,lowercase__ : Tuple="gelu" ,lowercase__ : Dict=0.0_2 ,lowercase__ : List[Any]=1e-1_2 ,lowercase__ : Optional[int]=0.0 ,lowercase__ : int=2_2_4 ,lowercase__ : int=None ,lowercase__ : Tuple=None ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = num_channels
__lowercase = patch_size
__lowercase = num_stages
__lowercase = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
__lowercase = [3, 3, 9, 3] if depths is None else depths
__lowercase = hidden_act
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = drop_path_rate
__lowercase = image_size
__lowercase = ['''stem'''] + [F"stage{idx}" for idx in range(1 ,len(self.depths ) + 1 )]
__lowercase , __lowercase = get_aligned_output_features_output_indices(
out_features=lowercase__ ,out_indices=lowercase__ ,stage_names=self.stage_names )
| 104 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
UpperCAmelCase = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCAmelCase = get_tests_dir("""fixtures/vocab.json""")
UpperCAmelCase = get_tests_dir("""fixtures""")
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = 0
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
_UpperCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Tuple ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = WavaVecaConfig()
_UpperCamelCase = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(__UpperCamelCase , os.path.join(__UpperCamelCase , __UpperCamelCase ) )
copyfile(__UpperCamelCase , os.path.join(__UpperCamelCase , '''vocab.json''' ) )
_UpperCamelCase = AutoProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = WavaVecaFeatureExtractor()
_UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
_UpperCamelCase = WavaVecaProcessor(__UpperCamelCase , __UpperCamelCase )
# save in new folder
processor.save_pretrained(__UpperCamelCase )
# drop `processor_class` in tokenizer
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , '''r''' ) as f:
_UpperCamelCase = json.load(__UpperCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , '''w''' ) as f:
f.write(json.dumps(__UpperCamelCase ) )
_UpperCamelCase = AutoProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : str ) -> str:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = WavaVecaFeatureExtractor()
_UpperCamelCase = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
_UpperCamelCase = WavaVecaProcessor(__UpperCamelCase , __UpperCamelCase )
# save in new folder
processor.save_pretrained(__UpperCamelCase )
# drop `processor_class` in feature extractor
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , '''r''' ) as f:
_UpperCamelCase = json.load(__UpperCamelCase )
config_dict.pop('''processor_class''' )
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , '''w''' ) as f:
f.write(json.dumps(__UpperCamelCase ) )
_UpperCamelCase = AutoProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(__UpperCamelCase )
# copy relevant files
copyfile(__UpperCamelCase , os.path.join(__UpperCamelCase , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(__UpperCamelCase , __UpperCamelCase ) , '''w''' ) as f:
f.write('''{}''' )
_UpperCamelCase = AutoProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__UpperCamelCase )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
_UpperCamelCase = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
_UpperCamelCase = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
_UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__UpperCamelCase , use_fast=__UpperCamelCase )
_UpperCamelCase = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def _UpperCamelCase ( self : Any ) -> Tuple:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase , slow_tokenizer_class=__UpperCamelCase )
AutoProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
_UpperCamelCase = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(__UpperCamelCase , '''vocab.txt''' )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = CustomTokenizer(__UpperCamelCase )
_UpperCamelCase = CustomProcessor(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : Tuple ) -> List[str]:
class UpperCAmelCase_ ( _lowercase):
snake_case__ = False
class UpperCAmelCase_ ( _lowercase):
snake_case__ = False
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''AutoFeatureExtractor'''
snake_case__ = '''AutoTokenizer'''
snake_case__ = False
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoFeatureExtractor.register(__UpperCamelCase , __UpperCamelCase )
AutoTokenizer.register(__UpperCamelCase , slow_tokenizer_class=__UpperCamelCase )
AutoProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local classes.
_UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_UpperCamelCase = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : int ) -> List[str]:
_UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def _UpperCamelCase ( self : List[str] ) -> str:
_UpperCamelCase = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase):
snake_case__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def _UpperCamelCase ( cls : str ) -> Any:
_UpperCamelCase = TOKEN
HfFolder.save_token(__UpperCamelCase )
@classmethod
def _UpperCamelCase ( cls : List[str] ) -> int:
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
_UpperCamelCase = WavaVecaProcessor.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__UpperCamelCase , '''test-processor''' ) , push_to_hub=__UpperCamelCase , use_auth_token=self._token )
_UpperCamelCase = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(new_processor.feature_extractor , __UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCamelCase ( self : Dict ) -> Any:
_UpperCamelCase = WavaVecaProcessor.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(__UpperCamelCase , '''test-processor-org''' ) , push_to_hub=__UpperCamelCase , use_auth_token=self._token , organization='''valid_org''' , )
_UpperCamelCase = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(__UpperCamelCase , getattr(new_processor.feature_extractor , __UpperCamelCase ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_UpperCamelCase = CustomFeatureExtractor.from_pretrained(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase = os.path.join(__UpperCamelCase , '''vocab.txt''' )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
_UpperCamelCase = CustomTokenizer(__UpperCamelCase )
_UpperCamelCase = CustomProcessor(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
_UpperCamelCase = Repository(__UpperCamelCase , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(__UpperCamelCase )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(__UpperCamelCase , '''tokenizer_config.json''' ) ) as f:
_UpperCamelCase = json.load(__UpperCamelCase )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(__UpperCamelCase , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__UpperCamelCase , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(__UpperCamelCase , '''custom_processing.py''' ) ) )
repo.push_to_hub()
_UpperCamelCase = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=__UpperCamelCase )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 54 | """simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Any ) -> Dict:
_UpperCamelCase = parent
_UpperCamelCase = config_class
_UpperCamelCase = has_text_modality
_UpperCamelCase = kwargs
_UpperCamelCase = common_properties
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = (
['''hidden_size''', '''num_attention_heads''', '''num_hidden_layers''']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['''vocab_size'''] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) , msg=F'''`{prop}` does not exist''' )
# Test that config has the common properties as setter
for idx, name in enumerate(__UpperCamelCase ):
try:
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__UpperCamelCase ):
try:
_UpperCamelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , msg=F'''`{name} value {idx} expected, but was {getattr(__UpperCamelCase , __UpperCamelCase )}''' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Tuple:
_UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__UpperCamelCase , '''config.json''' )
config_first.to_json_file(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_json_file(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : int ) -> List[str]:
_UpperCamelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_pretrained(__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : Dict ) -> Any:
_UpperCamelCase = self.config_class(**self.inputs_dict )
_UpperCamelCase = '''test'''
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
config_first.save_pretrained(__UpperCamelCase )
_UpperCamelCase = self.config_class.from_pretrained(__UpperCamelCase , subfolder=__UpperCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def _UpperCamelCase ( self : Dict ) -> int:
_UpperCamelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
_UpperCamelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def _UpperCamelCase ( self : Any ) -> str:
if self.config_class.is_composition:
return
_UpperCamelCase = self.config_class()
self.parent.assertIsNotNone(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
_UpperCamelCase = copy.deepcopy(__UpperCamelCase )
_UpperCamelCase = self.config_class(**__UpperCamelCase )
_UpperCamelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('''torch_dtype''', config.torch_dtype, torch.floataa) )
elif getattr(__UpperCamelCase , __UpperCamelCase ) != value:
wrong_values.append((key, getattr(__UpperCamelCase , __UpperCamelCase ), value) )
if len(__UpperCamelCase ) > 0:
_UpperCamelCase = '''\n'''.join([F'''- {v[0]}: got {v[1]} instead of {v[2]}''' for v in wrong_values] )
raise ValueError(F'''The following keys were not properly set in the config:\n{errors}''' )
def _UpperCamelCase ( self : Tuple ) -> int:
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 54 | 1 |
'''simple docstring'''
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase ( __UpperCAmelCase ):
pass
class _lowerCAmelCase :
def __init__(self ):
A_ : List[Any] = [
[],
[],
[],
]
def _a (self , lowercase , lowercase ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(lowercase )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def _a (self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__(self ):
return "\n".join(F'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class _lowerCAmelCase :
def __init__(self ):
A_ : List[str] = []
def _a (self , lowercase ):
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(lowercase )
def _a (self ):
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
A_ : List[Any] = min(self.queue )
self.queue.remove(lowercase )
return data
def __str__(self ):
return str(self.queue )
def a ( ):
'''simple docstring'''
A_ : Optional[int] = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(lowerCamelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(lowerCamelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a ( ):
'''simple docstring'''
A_ : int = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(lowerCamelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(lowerCamelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 206 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCamelCase :Tuple = '''path-to-your-trained-model'''
lowerCamelCase :Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
lowerCamelCase :Optional[int] = '''A photo of sks dog in a bucket'''
lowerCamelCase :List[Any] = pipe(prompt, num_inference_steps=5_0, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''') | 206 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowercase__ ( _UpperCAmelCase ):
A__ : str ="""facebook/nllb-200-distilled-600M"""
A__ : Tuple =(
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
A__ : Optional[int] ="""translator"""
A__ : int =AutoTokenizer
A__ : List[Any] =AutoModelForSeqaSeqLM
A__ : str =LANGUAGE_CODES
A__ : Dict =["""text""", """text""", """text"""]
A__ : Dict =["""text"""]
def A_ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] ):
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
SCREAMING_SNAKE_CASE__ = self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
UpperCAmelCase_ , return_tensors='pt' , src_lang=UpperCAmelCase_ , tgt_lang=UpperCAmelCase_ )
def A_ ( self : Tuple , UpperCAmelCase_ : Tuple ):
return self.model.generate(**UpperCAmelCase_ )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : int ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCAmelCase_ )
| 370 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class lowercase__ :
def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict=13 , UpperCAmelCase_ : Optional[Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : int=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Tuple=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[str]=16 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=None , ):
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , *UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : List[Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTDoubleHeadsModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = OpenAIGPTForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A__ : Union[str, Any] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
A__ : Any =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
A__ : Dict =(
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def A_ ( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def A_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict=False ):
SCREAMING_SNAKE_CASE__ = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = inputs_dict['labels']
SCREAMING_SNAKE_CASE__ = inputs_dict['labels']
SCREAMING_SNAKE_CASE__ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=UpperCAmelCase_ , )
SCREAMING_SNAKE_CASE__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_ )
return inputs_dict
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def A_ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def A_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*UpperCAmelCase_ )
@slow
def A_ ( self : Optional[int] ):
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = OpenAIGPTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def A_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([[481, 4735, 544]] , dtype=torch.long , device=UpperCAmelCase_ ) # the president is
SCREAMING_SNAKE_CASE__ = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
SCREAMING_SNAKE_CASE__ = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 169 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 104 |
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def _A ( A__ , A__=1.0 , A__=None , A__=None ):
"""simple docstring"""
if rng is None:
__lowercase = global_rng
__lowercase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] ,lowercase__ : List[str] ,lowercase__ : List[Any]=7 ,lowercase__ : Dict=4_0_0 ,lowercase__ : Tuple=2_0_0_0 ,lowercase__ : Optional[int]=1_0 ,lowercase__ : Optional[int]=1_6_0 ,lowercase__ : Dict=8 ,lowercase__ : str=0.0 ,lowercase__ : Union[str, Any]=4_0_0_0 ,lowercase__ : Optional[int]=False ,lowercase__ : List[str]=True ,):
__lowercase = parent
__lowercase = batch_size
__lowercase = min_seq_length
__lowercase = max_seq_length
__lowercase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowercase = padding_value
__lowercase = sampling_rate
__lowercase = return_attention_mask
__lowercase = do_normalize
__lowercase = feature_size
__lowercase = chunk_length
__lowercase = hop_length
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : List[str]=False ,lowercase__ : Dict=False ):
def _flatten(lowercase__ : Optional[Any] ):
return list(itertools.chain(*lowercase__ ) )
if equal_length:
__lowercase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__lowercase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__lowercase = [np.asarray(lowercase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase_ (lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = WhisperFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = feat_extract_first.save_pretrained(lowercase__ )[0]
check_json_file_has_correct_format(lowercase__ )
__lowercase = self.feature_extraction_class.from_pretrained(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowercase = os.path.join(lowercase__ ,'''feat_extract.json''' )
feat_extract_first.to_json_file(lowercase__ )
__lowercase = self.feature_extraction_class.from_json_file(lowercase__ )
__lowercase = feat_extract_first.to_dict()
__lowercase = feat_extract_second.to_dict()
__lowercase = feat_extract_first.mel_filters
__lowercase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ) )
self.assertEqual(lowercase__ ,lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowercase = [floats_list((1, x) )[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
# Test feature size
__lowercase = feature_extractor(lowercase__ ,padding='''max_length''' ,return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__lowercase = feature_extractor(speech_inputs[0] ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(np_speech_inputs[0] ,return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test batched
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__lowercase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowercase = np.asarray(lowercase__ )
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
# Test truncation required
__lowercase = [floats_list((1, x) )[0] for x in range(2_0_0 ,(feature_extractor.n_samples + 5_0_0) ,2_0_0 )]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs]
__lowercase = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowercase = [np.asarray(lowercase__ ) for speech_input in speech_inputs_truncated]
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
__lowercase = feature_extractor(lowercase__ ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(lowercase__ ,lowercase__ ):
self.assertTrue(np.allclose(lowercase__ ,lowercase__ ,atol=1e-3 ) )
def SCREAMING_SNAKE_CASE ( self : Dict ):
import torch
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = np.random.rand(1_0_0 ,3_2 ).astype(np.floataa )
__lowercase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__lowercase = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
# automatic decoding with librispeech
__lowercase = ds.sort('''id''' ).select(range(lowercase__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE ( self : str ):
# fmt: off
__lowercase = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
__lowercase = self._load_datasamples(1 )
__lowercase = WhisperFeatureExtractor()
__lowercase = feature_extractor(lowercase__ ,return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape ,(1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] ,lowercase__ ,atol=1e-4 ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowercase = self._load_datasamples(1 )[0]
__lowercase = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
__lowercase = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=lowercase__ )[0]
self.assertTrue(np.all(np.mean(lowercase__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase__ ) - 1 ) < 1e-3 ) )
| 104 | 1 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
snake_case__ : Union[str, Any] = getLogger(__name__)
snake_case__ : str = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _snake_case ( _snake_case : List[str] , _snake_case : str , _snake_case : str , _snake_case : int = 8 , _snake_case : str = DEFAULT_DEVICE , _snake_case : List[Any]=False , _snake_case : int="summarization" , _snake_case : List[Any]=None , **_snake_case : Any , ):
lowerCAmelCase : Tuple = Path(_snake_case ).open('''w''' , encoding='''utf-8''' )
lowerCAmelCase : Any = str(_snake_case )
lowerCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(_snake_case ).to(_snake_case )
if fpaa:
lowerCAmelCase : Optional[int] = model.half()
lowerCAmelCase : Tuple = AutoTokenizer.from_pretrained(_snake_case )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
lowerCAmelCase : Optional[Any] = time.time()
# update config with task specific params
use_task_specific_params(_snake_case , _snake_case )
if prefix is None:
lowerCAmelCase : Union[str, Any] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_snake_case , _snake_case ) ) ):
lowerCAmelCase : Union[str, Any] = [prefix + text for text in examples_chunk]
lowerCAmelCase : Optional[Any] = tokenizer(_snake_case , return_tensors='''pt''' , truncation=_snake_case , padding='''longest''' ).to(_snake_case )
lowerCAmelCase : List[str] = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_snake_case , )
lowerCAmelCase : Dict = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
lowerCAmelCase : Any = int(time.time() - start_time ) # seconds
lowerCAmelCase : List[Any] = len(_snake_case )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _snake_case ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _snake_case ( _snake_case : Tuple=True ):
lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_snake_case , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_snake_case , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_snake_case , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_snake_case , required=_snake_case , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_snake_case , required=_snake_case , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_snake_case , required=_snake_case , default=_snake_case , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_snake_case , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_snake_case , default=8 , required=_snake_case , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_snake_case , default=-1 , required=_snake_case , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_snake_case , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowerCAmelCase : Union[str, Any] = parser.parse_known_args()
lowerCAmelCase : List[str] = parse_numeric_n_bool_cl_kwargs(_snake_case )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
lowerCAmelCase : Optional[Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowerCAmelCase : int = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_snake_case )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
lowerCAmelCase : Optional[Any] = generate_summaries_or_translations(
_snake_case , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_snake_case , )
if args.reference_path is None:
return {}
# Compute scores
lowerCAmelCase : List[Any] = calculate_bleu if '''translation''' in args.task else calculate_rouge
lowerCAmelCase : str = [x.rstrip() for x in open(args.save_path ).readlines()]
lowerCAmelCase : Dict = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_snake_case )]
lowerCAmelCase : dict = score_fn(_snake_case , _snake_case )
scores.update(_snake_case )
if args.dump_args:
scores.update(_snake_case )
if args.info:
lowerCAmelCase : Dict = args.info
if verbose:
print(_snake_case )
if args.score_path is not None:
json.dump(_snake_case , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 367 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
snake_case__ : Optional[Any] = '''
import os
'''
snake_case__ : Tuple = '''
def foo():
import os
return False
'''
snake_case__ : Any = '''
def foo():
def bar():
if True:
import os
return False
return bar()
'''
snake_case__ : Any = '''
import os
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : int = '''
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
'''
snake_case__ : Any = '''
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
'''
snake_case__ : List[str] = '''
import os
try:
import bar
except ImportError as e:
raise ValueError()
'''
snake_case__ : int = '''
import os
try:
import bar
except:
raise ValueError()
'''
snake_case__ : List[Any] = '''
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
'''
snake_case__ : Optional[int] = '''
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
'''
snake_case__ : Any = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , _snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[str] ):
lowerCAmelCase : Dict = os.path.join(_snake_case , '''test_file.py''' )
with open(_snake_case , '''w''' ) as _tmp_file:
_tmp_file.write(_snake_case )
lowerCAmelCase : Tuple = get_imports(_snake_case )
assert parsed_imports == ["os"]
| 314 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
import math
def A_ ( snake_case , snake_case ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 139 | 0 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class a_ :
def __init__( self :int , _lowercase :List[str] , _lowercase :Union[str, Any] , _lowercase :bool = True , _lowercase :bool = False) -> Any:
UpperCAmelCase_ = scheduler
UpperCAmelCase_ = optimizers if isinstance(_lowercase , (list, tuple)) else [optimizers]
UpperCAmelCase_ = split_batches
UpperCAmelCase_ = step_with_optimizer
UpperCAmelCase_ = GradientState()
def __a ( self :str , *_lowercase :Optional[int] , **_lowercase :Union[str, Any]) -> List[str]:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_lowercase , **_lowercase)
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_lowercase , **_lowercase)
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase_ = AcceleratorState().num_processes
for _ in range(_lowercase):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps'''):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_lowercase , **_lowercase)
else:
self.scheduler.step(*_lowercase , **_lowercase)
def __a ( self :Dict) -> Dict:
return self.scheduler.get_last_lr()
def __a ( self :str) -> List[str]:
return self.scheduler.state_dict()
def __a ( self :Any , _lowercase :int) -> int:
self.scheduler.load_state_dict(_lowercase)
def __a ( self :str) -> Tuple:
return self.scheduler.get_lr()
def __a ( self :Optional[int] , *_lowercase :Optional[int] , **_lowercase :Union[str, Any]) -> List[str]:
return self.scheduler.print_lr(*_lowercase , **_lowercase)
| 344 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def A ( __UpperCAmelCase ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for rt in rc.restypes:
UpperCAmelCase_ = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
UpperCAmelCase_ = {name: i for i, name in enumerate(__UpperCAmelCase )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.intaa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = torch.tensor(
__UpperCAmelCase , dtype=torch.floataa , device=protein['''aatype'''].device , )
UpperCAmelCase_ = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
UpperCAmelCase_ = restype_atomaa_to_atomaa[protein_aatype]
UpperCAmelCase_ = residx_atomaa_to_atomaa.long()
# create the corresponding mask
UpperCAmelCase_ = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
UpperCAmelCase_ = rc.restype_atoa[restype_letter]
UpperCAmelCase_ = rc.residue_atoms[restype_name]
for atom_name in atom_names:
UpperCAmelCase_ = rc.atom_order[atom_name]
UpperCAmelCase_ = 1
UpperCAmelCase_ = restype_atomaa_mask[protein_aatype]
UpperCAmelCase_ = residx_atomaa_mask
return protein
def A ( __UpperCAmelCase ) -> Dict[str, np.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = tree_map(lambda __UpperCAmelCase : torch.tensor(__UpperCAmelCase , device=batch['''aatype'''].device ) , __UpperCAmelCase , np.ndarray )
UpperCAmelCase_ = tensor_tree_map(lambda __UpperCAmelCase : np.array(__UpperCAmelCase ) , make_atomaa_masks(__UpperCAmelCase ) )
return out
| 344 | 1 |
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __lowerCamelCase ( ):
"""simple docstring"""
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
a :Optional[int] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __lowerCamelCase ( ):
"""simple docstring"""
assert _test_patching.open is open
a :Dict = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , UpperCAmelCase_ ):
pass
def __lowerCamelCase ( ):
"""simple docstring"""
a :Any = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , UpperCAmelCase_ ) is None
with patch_submodule(_test_patching , '''len''' , UpperCAmelCase_ ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = '''__test_patch_submodule_start_and_stop_mock__'''
a :Any = patch_submodule(_test_patching , '''open''' , UpperCAmelCase_ )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __lowerCamelCase ( ):
"""simple docstring"""
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
a :Dict = '''__test_patch_submodule_successive_join__'''
a :List[str] = '''__test_patch_submodule_successive_dirname__'''
a :Any = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.join''' , UpperCAmelCase_ ):
with patch_submodule(_test_patching , '''os.path.dirname''' , UpperCAmelCase_ ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , UpperCAmelCase_ ):
pass
| 94 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase :
def __init__( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any]=99 , UpperCAmelCase : str=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : str=9 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=True , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Union[str, Any]=37 , UpperCAmelCase : int=8 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Any=0.0_0_2 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : Union[str, Any]=0 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , ) -> Union[str, Any]:
lowerCamelCase__ : int = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : Optional[int] = encoder_seq_length
lowerCamelCase__ : int = decoder_seq_length
# For common tests
lowerCamelCase__ : List[str] = self.decoder_seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : List[Any] = use_attention_mask
lowerCamelCase__ : Optional[Any] = use_labels
lowerCamelCase__ : Union[str, Any] = vocab_size
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : str = d_ff
lowerCamelCase__ : Optional[Any] = relative_attention_num_buckets
lowerCamelCase__ : Any = dropout_rate
lowerCamelCase__ : Any = initializer_factor
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : List[str] = pad_token_id
lowerCamelCase__ : List[str] = decoder_start_token_id
lowerCamelCase__ : List[Any] = None
lowerCamelCase__ : Optional[Any] = decoder_layers
def A_ ( self : List[Any] ) -> int:
return TaConfig.from_pretrained('google/umt5-base' )
def A_ ( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : str=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
if attention_mask is None:
lowerCamelCase__ : Optional[Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCamelCase__ : Optional[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCamelCase__ : int = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCAmelCase )
if decoder_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
if cross_attn_head_mask is None:
lowerCamelCase__ : Dict = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=UpperCAmelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def A_ ( self : str ) -> List[str]:
lowerCamelCase__ : Any = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCamelCase__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Union[str, Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCamelCase__ : Dict = self.get_config()
lowerCamelCase__ : Tuple = config.num_attention_heads
lowerCamelCase__ : Any = self.prepare_inputs_dict(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return config, input_dict
def A_ ( self : Tuple ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def A_ ( self : Optional[int] ) -> List[str]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Union[str, Any] ) -> Dict:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def A_ ( self : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Dict , ) -> str:
lowerCamelCase__ : Dict = UMTaModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowerCamelCase__ : Optional[int] = model(
input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase , attention_mask=UpperCAmelCase , decoder_attention_mask=UpperCAmelCase , )
lowerCamelCase__ : Any = model(input_ids=UpperCAmelCase , decoder_input_ids=UpperCAmelCase )
lowerCamelCase__ : Dict = result.last_hidden_state
lowerCamelCase__ : Any = result.past_key_values
lowerCamelCase__ : List[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(UpperCAmelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def A_ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
lowerCamelCase__ : List[Any] = UMTaModel(config=UpperCAmelCase ).get_decoder().to(UpperCAmelCase ).eval()
# first forward pass
lowerCamelCase__ : Tuple = model(UpperCAmelCase , use_cache=UpperCAmelCase )
lowerCamelCase__ : List[Any] = model(UpperCAmelCase )
lowerCamelCase__ : int = model(UpperCAmelCase , use_cache=UpperCAmelCase )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) )
self.parent.assertTrue(len(UpperCAmelCase ) == len(UpperCAmelCase ) + 1 )
lowerCamelCase__ , lowerCamelCase__ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__ : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCamelCase__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCamelCase__ : List[str] = model(UpperCAmelCase )['last_hidden_state']
lowerCamelCase__ : str = model(UpperCAmelCase , past_key_values=UpperCAmelCase )['last_hidden_state']
# select random slice
lowerCamelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCamelCase__ : Tuple = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCamelCase__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-3 ) )
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , ) -> Tuple:
lowerCamelCase__ : Union[str, Any] = UMTaModel(config=UpperCAmelCase ).to(UpperCAmelCase ).half().eval()
lowerCamelCase__ : Optional[int] = model(**UpperCAmelCase )['last_hidden_state']
self.parent.assertFalse(torch.isnan(UpperCAmelCase ).any().item() )
@require_torch
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCAmelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCAmelCase__ = (
{
"""conversational""": UMTaForConditionalGeneration,
"""feature-extraction""": UMTaModel,
"""summarization""": UMTaForConditionalGeneration,
"""text2text-generation""": UMTaForConditionalGeneration,
"""translation""": UMTaForConditionalGeneration,
"""question-answering""": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCAmelCase__ = [0.8, 0.9]
def A_ ( self : Union[str, Any] ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = UMTaModelTester(self )
@unittest.skip('Test has a segmentation fault on torch 1.8.0' )
def A_ ( self : Tuple ) -> int:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Tuple = UMTaModel(config_and_inputs[0] ).to(UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=UpperCAmelCase , opset_version=9 , input_names=['input_ids', 'decoder_input_ids'] , )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A_ ( self : Tuple ) -> Optional[Any]:
lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*UpperCAmelCase )
def A_ ( self : List[Any] ) -> str:
lowerCamelCase__ : int = ['encoder_attentions', 'decoder_attentions', 'cross_attentions']
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCamelCase__ : Any = config_and_inputs[0]
lowerCamelCase__ : Any = UMTaForConditionalGeneration(UpperCAmelCase ).eval()
model.to(UpperCAmelCase )
lowerCamelCase__ : Tuple = {
'head_mask': torch.zeros(config.num_layers , config.num_heads , device=UpperCAmelCase ),
'decoder_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
'cross_attn_head_mask': torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCAmelCase ),
}
for attn_name, (name, mask) in zip(UpperCAmelCase , head_masking.items() ):
lowerCamelCase__ : Union[str, Any] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCamelCase__ : Union[str, Any] = torch.ones(
config.num_decoder_layers , config.num_heads , device=UpperCAmelCase )
lowerCamelCase__ : Tuple = model.generate(
config_and_inputs[1]['input_ids'] , num_beams=1 , max_length=3 , output_attentions=UpperCAmelCase , return_dict_in_generate=UpperCAmelCase , **UpperCAmelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCamelCase__ : Union[str, Any] = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('Does not work on the tiny model as we keep hitting edge cases.' )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
'Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged' )
def A_ ( self : Any ) -> int:
lowerCamelCase__ : Optional[Any] = UMTaForConditionalGeneration.from_pretrained('google/umt5-small' , return_dict=UpperCAmelCase ).to(UpperCAmelCase )
lowerCamelCase__ : List[str] = AutoTokenizer.from_pretrained('google/umt5-small' , use_fast=UpperCAmelCase , legacy=UpperCAmelCase )
lowerCamelCase__ : Dict = [
'Bonjour monsieur <extra_id_0> bien <extra_id_1>.',
'No se como puedo <extra_id_0>.',
'This is the reason why we <extra_id_0> them.',
'The <extra_id_0> walks in <extra_id_1>, seats',
'A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.',
]
lowerCamelCase__ : Tuple = tokenizer(UpperCAmelCase , return_tensors='pt' , padding=UpperCAmelCase ).input_ids
# fmt: off
lowerCamelCase__ : Any = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(UpperCAmelCase , UpperCAmelCase )
lowerCamelCase__ : Optional[int] = model.generate(input_ids.to(UpperCAmelCase ) )
lowerCamelCase__ : List[Any] = [
'<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>',
'<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
'<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>',
]
lowerCamelCase__ : Union[str, Any] = tokenizer.batch_decode(UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
| 50 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
A__ = '''CompVis/stable-diffusion-v1-1'''
A__ = '''CompVis/stable-diffusion-v1-2'''
A__ = '''CompVis/stable-diffusion-v1-3'''
A__ = '''CompVis/stable-diffusion-v1-4'''
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , __a : AutoencoderKL , __a : CLIPTextModel , __a : CLIPTokenizer , __a : UNetaDConditionModel , __a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __a : StableDiffusionSafetyChecker , __a : CLIPImageProcessor , __a : bool = True , ) -> int:
'''simple docstring'''
super()._init_()
__snake_case : List[str] = StableDiffusionPipeline.from_pretrained(__a )
__snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(__a )
__snake_case : Dict = StableDiffusionPipeline.from_pretrained(__a )
__snake_case : Optional[Any] = StableDiffusionPipeline(
vae=__a , text_encoder=__a , tokenizer=__a , unet=__a , scheduler=__a , safety_checker=__a , feature_extractor=__a , requires_safety_checker=__a , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A_ ( self : Optional[int] ) -> Dict[str, Any]:
'''simple docstring'''
return {k: getattr(self , __a ) for k in self.config.keys() if not k.startswith('_' )}
def A_ ( self : Tuple , __a : Optional[Union[str, int]] = "auto" ) -> Any:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a )
def A_ ( self : List[str] ) -> Tuple:
'''simple docstring'''
self.enable_attention_slicing(__a )
@torch.no_grad()
def A_ ( self : Union[str, Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Dict , ) -> str:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : Optional[int] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Any , ) -> Any:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : str , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ) -> Dict:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : List[str] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
return self.pipea(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
@torch.no_grad()
def A_ ( self : List[Any] , __a : Union[str, List[str]] , __a : int = 512 , __a : int = 512 , __a : int = 50 , __a : float = 7.5 , __a : Optional[Union[str, List[str]]] = None , __a : Optional[int] = 1 , __a : float = 0.0 , __a : Optional[torch.Generator] = None , __a : Optional[torch.FloatTensor] = None , __a : Optional[str] = "pil" , __a : bool = True , __a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __a : int = 1 , **__a : int , ) -> List[str]:
'''simple docstring'''
__snake_case : Tuple = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__a )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__snake_case : str = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.2
__snake_case : Optional[Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.3
__snake_case : List[str] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get first result from Stable Diffusion Checkpoint v1.4
__snake_case : Union[str, Any] = self.textaimg_sda_a(
prompt=__a , height=__a , width=__a , num_inference_steps=__a , guidance_scale=__a , negative_prompt=__a , num_images_per_prompt=__a , eta=__a , generator=__a , latents=__a , output_type=__a , return_dict=__a , callback=__a , callback_steps=__a , **__a , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 369 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _a ( unittest.TestCase ):
def __init__( self: Dict , UpperCamelCase_: List[str] , UpperCamelCase_: List[str]=7 , UpperCamelCase_: str=3 , UpperCamelCase_: List[str]=18 , UpperCamelCase_: Any=30 , UpperCamelCase_: Tuple=400 , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Any=True , UpperCamelCase_: Optional[int]=True , UpperCamelCase_: Optional[int]=None , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = size if size is not None else {"height": 20, "width": 20}
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = size
lowercase__ = do_normalize
lowercase__ = do_convert_rgb
lowercase__ = [512, 1_024, 2_048, 4_096]
lowercase__ = patch_size if patch_size is not None else {"height": 16, "width": 16}
def lowerCamelCase_ ( self: List[str] ) -> Optional[int]:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
lowercase__ = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
lowercase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert('''RGB''' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _a ( A__ , unittest.TestCase ):
_lowercase : int = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = PixaStructImageProcessingTester(self )
@property
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) )
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processor_tester.prepare_dummy_image()
lowercase__ = self.image_processing_class(**self.image_processor_dict )
lowercase__ = 2_048
lowercase__ = image_processor(UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1E-3 , rtol=1E-3 ) )
def lowerCamelCase_ ( self: Dict ) -> Any:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: Union[str, Any] ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
lowercase__ = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(UpperCamelCase_ ):
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
lowercase__ = "Hello"
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ , header_text=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class _a ( A__ , unittest.TestCase ):
_lowercase : Tuple = PixaStructImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self: Optional[int] ) -> List[Any]:
"""simple docstring"""
lowercase__ = PixaStructImageProcessingTester(self , num_channels=4 )
lowercase__ = 3
@property
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_convert_rgb''' ) )
def lowerCamelCase_ ( self: List[Any] ) -> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase__ = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
lowercase__ = image_processor(
image_inputs[0] , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
lowercase__ = image_processor(
UpperCamelCase_ , return_tensors='''pt''' , max_patches=UpperCamelCase_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 110 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCamelCase : Any =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : List[str] = ['''pixel_values''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 2_55 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = size if size is not None else {"shortest_edge": 2_24}
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56}
UpperCamelCase__ : int = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : Dict = do_resize
UpperCamelCase__ : List[str] = size
UpperCamelCase__ : int = resample
UpperCamelCase__ : Optional[int] = do_rescale
UpperCamelCase__ : List[Any] = rescale_factor
UpperCamelCase__ : Union[str, Any] = do_center_crop
UpperCamelCase__ : int = crop_size
UpperCamelCase__ : Optional[int] = do_flip_channel_order
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PIL.Image.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}' )
UpperCamelCase__ : Any = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[Any] , ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None ):
'''simple docstring'''
return flip_channel_order(SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : List[str] = resample if resample is not None else self.resample
UpperCamelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ : Any = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
UpperCamelCase__ : Optional[int] = size if size is not None else self.size
UpperCamelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" )
UpperCamelCase__ : int = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Tuple = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase__ : Optional[int] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
UpperCamelCase__ : Any = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase__ : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
UpperCamelCase__ : Any = [self.flip_channel_order(image=SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[int] = {"pixel_values": images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[Tuple] = None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(SCREAMING_SNAKE_CASE ) != len(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Optional[Any] = target_sizes.numpy()
UpperCamelCase__ : Any = []
for idx in range(len(SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[str] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ : List[str] = logits.argmax(dim=1 )
UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 189 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
_lowerCAmelCase : Union[str, Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowerCAmelCase : str = (((515, 22, 13), 555), ((61, 35, 49), 150))
_lowerCAmelCase : int = [2, 4, 1, 5]
_lowerCAmelCase : Dict = len(train_data)
_lowerCAmelCase : int = 0.009
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase="train" ) -> int:
'''simple docstring'''
return calculate_hypothesis_value(_lowerCamelCase , _lowerCamelCase ) - output(
_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = 0
for i in range(len(_lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=m ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Tuple = 0
for i in range(_lowerCamelCase ):
if index == -1:
summation_value += _error(_lowerCamelCase )
else:
summation_value += _error(_lowerCamelCase ) * train_data[i][0][index]
return summation_value
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : int = summation_of_cost_derivative(_lowerCamelCase , _lowerCamelCase ) / m
return cost_derivative_value
def lowerCamelCase_( ) -> str:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_lowerCamelCase : str = 0.0_0_0_0_0_2
_lowerCamelCase : List[str] = 0
_lowerCamelCase : Optional[Any] = 0
while True:
j += 1
_lowerCamelCase : int = [0, 0, 0, 0]
for i in range(0 , len(_lowerCamelCase ) ):
_lowerCamelCase : Any = get_cost_derivative(i - 1 )
_lowerCamelCase : Union[str, Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_lowerCamelCase , _lowerCamelCase , atol=_lowerCamelCase , rtol=_lowerCamelCase , ):
break
_lowerCamelCase : List[str] = temp_parameter_vector
print(("Number of iterations:", j) )
def lowerCamelCase_( ) -> int:
'''simple docstring'''
for i in range(len(_lowerCamelCase ) ):
print(("Actual output value:", output(_lowerCamelCase , "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(_lowerCamelCase , "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent() | 370 |
"""simple docstring"""
_lowerCAmelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = [False] * len(_lowerCamelCase )
_lowerCamelCase : Union[str, Any] = [s]
_lowerCamelCase : str = True
while queue:
_lowerCamelCase : Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(_lowerCamelCase )
_lowerCamelCase : Any = True
_lowerCamelCase : Any = u
return visited[t]
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : List[str] = [-1] * (len(_lowerCamelCase ))
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = float("Inf" )
_lowerCamelCase : Dict = sink
while s != source:
# Find the minimum value in select path
_lowerCamelCase : Union[str, Any] = min(_lowerCamelCase , graph[parent[s]][s] )
_lowerCamelCase : Union[str, Any] = parent[s]
max_flow += path_flow
_lowerCamelCase : Optional[Any] = sink
while v != source:
_lowerCamelCase : Union[str, Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_lowerCamelCase : List[str] = parent[v]
for i in range(len(_lowerCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 340 | 0 |
"""simple docstring"""
# Imports
import numpy as np
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : int , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None ):
if red is not None:
lowerCAmelCase_ : Optional[int] = red
if green is not None:
lowerCAmelCase_ : Tuple = green
if blue is not None:
lowerCAmelCase_ : List[Any] = blue
if red_edge is not None:
lowerCAmelCase_ : str = red_edge
if nir is not None:
lowerCAmelCase_ : List[str] = nir
return True
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : int="" , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Tuple=None ):
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return self.nir * (self.red / (self.green**2))
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def SCREAMING_SNAKE_CASE__ ( self : int ):
return (self.nir - self.red) / (self.nir + self.red)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return (self.nir - self.green) / (self.nir + self.green)
def SCREAMING_SNAKE_CASE__ ( self : str ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : str=0.08 , SCREAMING_SNAKE_CASE_ : List[str]=1.22 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def SCREAMING_SNAKE_CASE__ ( self : str ):
return (self.nir / self.green) - 1
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return (self.nir / self.redEdge) - 1
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (self.red - self.blue) / self.red
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self.nir - self.green
def SCREAMING_SNAKE_CASE__ ( self : int ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Optional[int]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
return (self.nir - b) / (a * self.red)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self : str ):
return (self.red + self.green + self.blue) / 30.5
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.green / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
return self.nir / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.red / (self.nir + self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return (self.green - self.red) / (self.green + self.red)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return (self.red - self.green) / (self.red + self.green)
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Tuple = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase_ : Union[str, Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def SCREAMING_SNAKE_CASE__ ( self : int ):
return self.nir / self.red
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 224 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : List[str] = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """informer"""
_SCREAMING_SNAKE_CASE = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "student_t" , SCREAMING_SNAKE_CASE_ : str = "nll" , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : List[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, bool]] = "mean" , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : int = 0 , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : int = 6_4 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 3_2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : int = 2 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "gelu" , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : float = 0.1 , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str = "prob" , SCREAMING_SNAKE_CASE_ : int = 5 , SCREAMING_SNAKE_CASE_ : bool = True , **SCREAMING_SNAKE_CASE_ : int , ):
# time series specific configuration
lowerCAmelCase_ : Dict = prediction_length
lowerCAmelCase_ : List[str] = context_length or prediction_length
lowerCAmelCase_ : List[Any] = distribution_output
lowerCAmelCase_ : int = loss
lowerCAmelCase_ : Optional[int] = input_size
lowerCAmelCase_ : Tuple = num_time_features
lowerCAmelCase_ : List[str] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowerCAmelCase_ : int = scaling
lowerCAmelCase_ : List[Any] = num_dynamic_real_features
lowerCAmelCase_ : Union[str, Any] = num_static_real_features
lowerCAmelCase_ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : str = cardinality
else:
lowerCAmelCase_ : Any = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(SCREAMING_SNAKE_CASE_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase_ : Optional[int] = embedding_dimension
else:
lowerCAmelCase_ : Union[str, Any] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase_ : Optional[int] = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase_ : Any = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase_ : Optional[Any] = decoder_attention_heads
lowerCAmelCase_ : Any = encoder_ffn_dim
lowerCAmelCase_ : List[str] = decoder_ffn_dim
lowerCAmelCase_ : Optional[Any] = encoder_layers
lowerCAmelCase_ : Tuple = decoder_layers
lowerCAmelCase_ : Optional[int] = dropout
lowerCAmelCase_ : Dict = attention_dropout
lowerCAmelCase_ : int = activation_dropout
lowerCAmelCase_ : Dict = encoder_layerdrop
lowerCAmelCase_ : str = decoder_layerdrop
lowerCAmelCase_ : Union[str, Any] = activation_function
lowerCAmelCase_ : Union[str, Any] = init_std
lowerCAmelCase_ : Union[str, Any] = use_cache
# Informer
lowerCAmelCase_ : Optional[int] = attention_type
lowerCAmelCase_ : Any = sampling_factor
lowerCAmelCase_ : int = distil
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 224 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
__a ='maskformer-swin'
__a ={
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[Any] , __a : Optional[Any]=2_24 , __a : Any=4 , __a : Tuple=3 , __a : Optional[int]=96 , __a : Any=[2, 2, 6, 2] , __a : Tuple=[3, 6, 12, 24] , __a : str=7 , __a : str=4.0 , __a : List[Any]=True , __a : Union[str, Any]=0.0 , __a : Any=0.0 , __a : Optional[int]=0.1 , __a : List[Any]="gelu" , __a : int=False , __a : str=0.02 , __a : Tuple=1e-5 , __a : Optional[int]=None , __a : List[str]=None , **__a : Optional[Any] , ):
super().__init__(**__a )
_a = image_size
_a = patch_size
_a = num_channels
_a = embed_dim
_a = depths
_a = len(__a )
_a = num_heads
_a = window_size
_a = mlp_ratio
_a = qkv_bias
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = drop_path_rate
_a = hidden_act
_a = use_absolute_embeddings
_a = layer_norm_eps
_a = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_a = int(embed_dim * 2 ** (len(__a ) - 1) )
_a = ["stem"] + [f'stage{idx}' for idx in range(1 , len(__a ) + 1 )]
_a , _a = get_aligned_output_features_output_indices(
out_features=__a , out_indices=__a , stage_names=self.stage_names )
| 346 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ : Union[str, Any] = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ : Tuple = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ : Optional[int] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ : Any = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ : Tuple = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ : Optional[int] = '3.0.12'
lowerCAmelCase_ : Tuple = None
def _lowerCamelCase ( ) -> Optional[int]:
global _logger
_a = _logger or logging.getLogger(__name__ )
return _logger
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Dict , __a : Optional[Any] ):
_a = lock_file
return None
def __str__( self : Any ):
_a = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[Any] , __a : Optional[int] ):
_a = lock
return None
def __enter__( self : str ):
return self.lock
def __exit__( self : List[Any] , __a : List[Any] , __a : Union[str, Any] , __a : Dict ):
self.lock.release()
return None
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Optional[int]=-1 , __a : Tuple=None ):
_a = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_a = self.hash_filename_if_too_long(__a , __a )
# The path to the lock file.
_a = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_a = None
# The default timeout value.
_a = timeout
# We use this lock primarily for the lock counter.
_a = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_a = 0
return None
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file
@property
def UpperCamelCase__ ( self : List[Any] ):
return self._timeout
@timeout.setter
def UpperCamelCase__ ( self : int , __a : List[Any] ):
_a = float(__a )
return None
def UpperCamelCase__ ( self : Dict ):
raise NotImplementedError()
def UpperCamelCase__ ( self : str ):
raise NotImplementedError()
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def UpperCamelCase__ ( self : int , __a : int=None , __a : Tuple=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
_a = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_a = id(self )
_a = self._lock_file
_a = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__a )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_a = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCamelCase__ ( self : Union[str, Any] , __a : int=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_a = id(self )
_a = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
_a = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : List[Any] ):
self.acquire()
return self
def __exit__( self : str , __a : str , __a : Dict , __a : Dict ):
self.release()
return None
def __del__( self : int ):
self.release(force=__a )
return None
def UpperCamelCase__ ( self : Tuple , __a : str , __a : int ):
_a = os.path.basename(__a )
if len(__a ) > max_length and max_length > 0:
_a = os.path.dirname(__a )
_a = str(hash(__a ) )
_a = filename[: max_length - len(__a ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(__a , __a )
else:
return path
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : int , __a : str , __a : List[Any]=-1 , __a : List[Any]=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__a , timeout=__a , max_filename_length=__a )
_a = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def UpperCamelCase__ ( self : int ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
try:
msvcrt.locking(__a , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self._lock_file_fd
_a = None
msvcrt.locking(__a , msvcrt.LK_UNLCK , 1 )
os.close(__a )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : List[str] , __a : Optional[Any] , __a : Union[str, Any]=-1 , __a : int=None ):
_a = os.statvfs(os.path.dirname(__a ) ).f_namemax
super().__init__(__a , timeout=__a , max_filename_length=__a )
def UpperCamelCase__ ( self : Any ):
_a = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_a = os.open(self._lock_file , __a )
try:
fcntl.flock(__a , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__a )
else:
_a = fd
return None
def UpperCamelCase__ ( self : Tuple ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
_a = self._lock_file_fd
_a = None
fcntl.flock(__a , fcntl.LOCK_UN )
os.close(__a )
return None
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_a = os.open(self._lock_file , __a )
except OSError:
pass
else:
_a = fd
return None
def UpperCamelCase__ ( self : Union[str, Any] ):
os.close(self._lock_file_fd )
_a = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ : str = None
if msvcrt:
lowerCAmelCase_ : List[str] = WindowsFileLock
elif fcntl:
lowerCAmelCase_ : List[str] = UnixFileLock
else:
lowerCAmelCase_ : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 346 | 1 |
from __future__ import annotations
import math
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
if depth < 0:
raise ValueError("""Depth cannot be less than 0""" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError("""Scores cannot be empty""" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , minimax(depth + 1 , node_index * 2 + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , )
def _snake_case( ) -> None:
lowercase : str = [90, 23, 6, 33, 21, 65, 123, 34_423]
lowercase : Dict = math.log(len(SCREAMING_SNAKE_CASE__ ) , 2 )
print("""Optimal value : """ , end="""""" )
print(minimax(0 , 0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 20 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowercase : str = """\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
lowercase : Dict = """\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
lowercase : int = """
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
return float((preds == labels).mean() )
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Any = simple_accuracy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = float(fa_score(y_true=SCREAMING_SNAKE_CASE__ , y_pred=SCREAMING_SNAKE_CASE__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
lowercase : Union[str, Any] = float(pearsonr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
lowercase : Dict = float(spearmanr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ),
} ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ):
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(snake_case ,snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(snake_case ,snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(snake_case ,snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(snake_case ,snake_case )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """
"""\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
| 20 | 1 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
A__ = []
for part_id in partition_order:
A__ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowercase_ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
A__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
A__ = spark.range(1_00 ).repartition(1 )
A__ = Spark(lowercase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _SCREAMING_SNAKE_CASE ( ) -> int:
A__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
A__ = spark.range(10 ).repartition(2 )
A__ = [1, 0]
A__ = _generate_iterable_examples(lowercase_ , lowercase_ ) # Reverse the partitions.
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , lowercase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__, A__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
A__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
A__ = spark.range(10 ).repartition(1 )
A__ = SparkExamplesIterable(lowercase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase_ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
A__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
A__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
A__ = lambda lowercase_ : x.reverse()
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [2, 1, 0] )
A__ = SparkExamplesIterable(lowercase_ ).shuffle_data_sources(lowercase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase_ ):
A__, A__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _SCREAMING_SNAKE_CASE ( ) -> Any:
A__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
A__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase_ ):
A__, A__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase_ ):
A__, A__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
A__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
A__ = spark.range(1_00 ).repartition(1 )
A__ = Spark(lowercase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 350 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( A_ ):
lowercase__ = ['''image_processor''', '''tokenizer''']
lowercase__ = '''ViTImageProcessor'''
lowercase__ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=None , **snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
A__ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
A__ = kwargs.pop("feature_extractor" )
A__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : int , snake_case_ : Union[str, Any]=None , snake_case_ : int=None , snake_case_ : Dict=None , snake_case_ : int=None , **snake_case_ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
A__ = self.tokenizer(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if images is not None:
A__ = self.image_processor(snake_case_ , return_tensors=snake_case_ , **snake_case_ )
if visual_prompt is not None and images is not None:
A__ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
A__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
A__ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**snake_case_ ) , tensor_type=snake_case_ )
def __magic_name__ ( self : Tuple , *snake_case_ : List[str] , **snake_case_ : Optional[int] ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def __magic_name__ ( self : Optional[Any] , *snake_case_ : Any , **snake_case_ : List[Any] ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def __magic_name__ ( self : int ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def __magic_name__ ( self : int ) -> int:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 230 | 0 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
a__ : Dict = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
@classmethod
def UpperCAmelCase_ ( cls : Optional[int] ) -> Tuple:
__SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(UpperCAmelCase__ )
@classmethod
def UpperCAmelCase_ ( cls : List[str] ) -> List[str]:
try:
delete_repo(token=cls._token , repo_id="test-model-flax" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" )
except HTTPError:
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
__SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase__ )
model.push_to_hub("test-model-flax" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="test-model-flax" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(UpperCAmelCase__ , repo_id="test-model-flax" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(F"""{USER}/test-model-flax""" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 , msg=F"""{key} not identical""" )
def UpperCAmelCase_ ( self : int ) -> Any:
__SCREAMING_SNAKE_CASE = BertConfig(
vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase__ )
model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 , msg=F"""{key} not identical""" )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
UpperCAmelCase__ , repo_id="valid_org/test-model-flax-org" , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(model.params ) )
__SCREAMING_SNAKE_CASE = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__SCREAMING_SNAKE_CASE = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 , msg=F"""{key} not identical""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
__SCREAMING_SNAKE_CASE = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
__SCREAMING_SNAKE_CASE = False
return models_are_equal
@require_flax
class UpperCamelCase_ ( unittest.TestCase):
"""simple docstring"""
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) )
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.assertTrue(check_models_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : int ) -> List[Any]:
__SCREAMING_SNAKE_CASE = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
__SCREAMING_SNAKE_CASE = FlaxBertModel(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = "bert"
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , max_shard_size="10KB" )
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.assertTrue(check_models_equal(UpperCAmelCase__ , UpperCAmelCase__ ) )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
__SCREAMING_SNAKE_CASE = "bert"
__SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-bert-subfolder"
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Tuple ) -> int:
__SCREAMING_SNAKE_CASE = "bert"
__SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-bert-sharded-subfolder"
with self.assertRaises(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = FlaxBertModel.from_pretrained(UpperCAmelCase__ , subfolder=UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
| 54 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ = 100_0000 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set(range(3 , lowerCAmelCase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowerCAmelCase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowerCAmelCase_ , lowerCAmelCase_ ) ) )
__SCREAMING_SNAKE_CASE = [float(lowerCAmelCase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowerCAmelCase_ , limit + 1 , lowerCAmelCase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 54 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Dict:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Union[str, Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[str]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> int:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> str:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Tuple:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> List[Any]:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Any:
requires_backends(self , ['''sentencepiece'''] )
class __A ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["sentencepiece"]
def __init__( self , *__A , **__A ) -> Optional[int]:
requires_backends(self , ['''sentencepiece'''] ) | 215 |
"""simple docstring"""
import os
import sys
import unittest
lowerCamelCase_ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCamelCase_ : Dict = os.path.join("""tests""", """models""", """bert""", """test_modeling_bert.py""")
lowerCamelCase_ : Dict = os.path.join("""tests""", """models""", """blip""", """test_modeling_blip.py""")
class __A ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =get_test_to_tester_mapping(__A )
a =get_test_to_tester_mapping(__A )
a ={'''BertModelTest''': '''BertModelTester'''}
a ={
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =get_model_to_test_mapping(__A )
a =get_model_to_test_mapping(__A )
a ={
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
a ={
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
a =get_model_to_tester_mapping(__A )
a =get_model_to_tester_mapping(__A )
a ={
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
a ={
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A ) | 215 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''summarization'''
UpperCAmelCase__ = ['''loss''']
UpperCAmelCase__ = ROUGE_KEYS
UpperCAmelCase__ = '''rouge2'''
def __init__( self : List[Any] , UpperCAmelCase__ : int , **UpperCAmelCase__ : Any) ->Tuple:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
A__ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError('''Dynamic Batch size does not work for multi-gpu training''')
if hparams.sortish_sampler:
raise ValueError('''--sortish_sampler and --max_tokens_per_batch may not be used simultaneously''')
super().__init__(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , mode=self.mode , **UpperCAmelCase__)
use_task_specific_params(self.model , '''summarization''')
save_git_info(self.hparams.output_dir)
A__ = Path(self.output_dir) / '''metrics.json'''
A__ = Path(self.output_dir) / '''hparams.pkl'''
pickle_save(self.hparams , self.hparams_save_path)
A__ = 0
A__ = defaultdict(UpperCAmelCase__)
A__ = self.config.model_type
A__ = self.config.tgt_vocab_size if self.model_type == '''fsmt''' else self.config.vocab_size
A__ = {
'''data_dir''': self.hparams.data_dir,
'''max_source_length''': self.hparams.max_source_length,
'''prefix''': self.model.config.prefix or '''''',
}
A__ = {
'''train''': self.hparams.n_train,
'''val''': self.hparams.n_val,
'''test''': self.hparams.n_test,
}
A__ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A__ = {
'''train''': self.hparams.max_target_length,
'''val''': self.hparams.val_max_target_length,
'''test''': self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model)
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder())
assert_all_frozen(self.model.get_encoder())
A__ = get_git_info()['''repo_sha''']
A__ = hparams.num_workers
A__ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCAmelCase__):
A__ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A__ = self.decoder_start_token_id
A__ = (
SeqaSeqDataset if hasattr(self.tokenizer , '''prepare_seq2seq_batch''') else LegacySeqaSeqDataset
)
A__ = False
A__ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A__ = self.hparams.eval_max_gen_length
else:
A__ = self.model.config.max_length
A__ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Dict[str, torch.Tensor]) ->Dict[str, List[str]]:
'''simple docstring'''
A__ = {
k: self.tokenizer.batch_decode(v.tolist()) if '''mask''' not in k else v.shape for k, v in batch.items()
}
save_json(UpperCAmelCase__ , Path(self.output_dir) / '''text_batch.json''')
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir) / '''tok_batch.json''')
A__ = True
return readable_batch
def SCREAMING_SNAKE_CASE ( self : List[Any] , UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[Any]) ->Optional[int]:
'''simple docstring'''
return self.model(UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase__ : List[int]) ->Any:
'''simple docstring'''
A__ = self.tokenizer.batch_decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__)
return lmap(str.strip , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : dict) ->Tuple:
'''simple docstring'''
A__ = self.tokenizer.pad_token_id
A__ , A__ = batch['''input_ids'''], batch['''attention_mask''']
A__ = batch['''labels''']
if isinstance(self.model , UpperCAmelCase__):
A__ = self.model._shift_right(UpperCAmelCase__)
else:
A__ = shift_tokens_right(UpperCAmelCase__ , UpperCAmelCase__)
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A__ = decoder_input_ids
self.save_readable_batch(UpperCAmelCase__)
A__ = self(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , decoder_input_ids=UpperCAmelCase__ , use_cache=UpperCAmelCase__)
A__ = outputs['''logits''']
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A__ = nn.CrossEntropyLoss(ignore_index=UpperCAmelCase__)
assert lm_logits.shape[-1] == self.vocab_size
A__ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1]) , tgt_ids.view(-1))
else:
A__ = nn.functional.log_softmax(UpperCAmelCase__ , dim=-1)
A__ , A__ = label_smoothed_nll_loss(
UpperCAmelCase__ , UpperCAmelCase__ , self.hparams.label_smoothing , ignore_index=UpperCAmelCase__)
return (loss,)
@property
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple) ->Dict:
'''simple docstring'''
A__ = self._step(UpperCAmelCase__)
A__ = dict(zip(self.loss_names , UpperCAmelCase__))
# tokens per batch
A__ = batch['''input_ids'''].ne(self.pad).sum() + batch['''labels'''].ne(self.pad).sum()
A__ = batch['''input_ids'''].shape[0]
A__ = batch['''input_ids'''].eq(self.pad).sum()
A__ = batch['''input_ids'''].eq(self.pad).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str]) ->Dict:
'''simple docstring'''
return self._generative_step(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int="val") ->Dict:
'''simple docstring'''
self.step_count += 1
A__ = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names}
A__ = losses['''loss''']
A__ = {
k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ['''gen_time''', '''gen_len''']
}
A__ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A__ = torch.tensor(UpperCAmelCase__).type_as(UpperCAmelCase__)
generative_metrics.update({k: v.item() for k, v in losses.items()})
losses.update(UpperCAmelCase__)
A__ = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
A__ = self.step_count
self.metrics[prefix].append(UpperCAmelCase__) # callback writes this to self.metrics_save_path
A__ = flatten_list([x['''preds'''] for x in outputs])
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int) ->Dict:
'''simple docstring'''
return calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : dict) ->dict:
'''simple docstring'''
A__ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A__ = self.model.generate(
batch['''input_ids'''] , attention_mask=batch['''attention_mask'''] , use_cache=UpperCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A__ = (time.time() - ta) / batch['''input_ids'''].shape[0]
A__ = self.ids_to_clean_text(UpperCAmelCase__)
A__ = self.ids_to_clean_text(batch['''labels'''])
A__ = self._step(UpperCAmelCase__)
A__ = dict(zip(self.loss_names , UpperCAmelCase__))
A__ = self.calc_generative_metrics(UpperCAmelCase__ , UpperCAmelCase__)
A__ = np.mean(lmap(UpperCAmelCase__ , UpperCAmelCase__))
base_metrics.update(gen_time=UpperCAmelCase__ , gen_len=UpperCAmelCase__ , preds=UpperCAmelCase__ , target=UpperCAmelCase__ , **UpperCAmelCase__)
return base_metrics
def SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]) ->Optional[int]:
'''simple docstring'''
return self._generative_step(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : Tuple) ->Any:
'''simple docstring'''
return self.validation_epoch_end(UpperCAmelCase__ , prefix='''test''')
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : Optional[Any]) ->SeqaSeqDataset:
'''simple docstring'''
A__ = self.n_obs[type_path]
A__ = self.target_lens[type_path]
A__ = self.dataset_class(
self.tokenizer , type_path=UpperCAmelCase__ , n_obs=UpperCAmelCase__ , max_target_length=UpperCAmelCase__ , **self.dataset_kwargs , )
return dataset
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False) ->DataLoader:
'''simple docstring'''
A__ = self.get_dataset(UpperCAmelCase__)
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A__ = dataset.make_sortish_sampler(UpperCAmelCase__ , distributed=self.hparams.gpus > 1)
return DataLoader(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase__ , num_workers=self.num_workers , sampler=UpperCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A__ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1)
return DataLoader(
UpperCAmelCase__ , batch_sampler=UpperCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCAmelCase__ , num_workers=self.num_workers , sampler=UpperCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self : Any) ->DataLoader:
'''simple docstring'''
A__ = self.get_dataloader('''train''' , batch_size=self.hparams.train_batch_size , shuffle=UpperCAmelCase__)
return dataloader
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->DataLoader:
'''simple docstring'''
return self.get_dataloader('''val''' , batch_size=self.hparams.eval_batch_size)
def SCREAMING_SNAKE_CASE ( self : List[str]) ->DataLoader:
'''simple docstring'''
return self.get_dataloader('''test''' , batch_size=self.hparams.eval_batch_size)
@staticmethod
def SCREAMING_SNAKE_CASE ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any]) ->List[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__)
add_generic_args(UpperCAmelCase__ , UpperCAmelCase__)
parser.add_argument(
'''--max_source_length''' , default=1_024 , type=UpperCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--max_target_length''' , default=56 , type=UpperCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--val_max_target_length''' , default=142 , type=UpperCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--test_max_target_length''' , default=142 , type=UpperCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument('''--freeze_encoder''' , action='''store_true''')
parser.add_argument('''--freeze_embeds''' , action='''store_true''')
parser.add_argument('''--sortish_sampler''' , action='''store_true''' , default=UpperCAmelCase__)
parser.add_argument('''--overwrite_output_dir''' , action='''store_true''' , default=UpperCAmelCase__)
parser.add_argument('''--max_tokens_per_batch''' , type=UpperCAmelCase__ , default=UpperCAmelCase__)
parser.add_argument('''--logger_name''' , type=UpperCAmelCase__ , choices=['''default''', '''wandb''', '''wandb_shared'''] , default='''default''')
parser.add_argument('''--n_train''' , type=UpperCAmelCase__ , default=-1 , required=UpperCAmelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_val''' , type=UpperCAmelCase__ , default=500 , required=UpperCAmelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument('''--n_test''' , type=UpperCAmelCase__ , default=-1 , required=UpperCAmelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument(
'''--task''' , type=UpperCAmelCase__ , default='''summarization''' , required=UpperCAmelCase__ , help='''# examples. -1 means use all.''')
parser.add_argument('''--label_smoothing''' , type=UpperCAmelCase__ , default=0.0 , required=UpperCAmelCase__)
parser.add_argument('''--src_lang''' , type=UpperCAmelCase__ , default='''''' , required=UpperCAmelCase__)
parser.add_argument('''--tgt_lang''' , type=UpperCAmelCase__ , default='''''' , required=UpperCAmelCase__)
parser.add_argument('''--eval_beams''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , required=UpperCAmelCase__)
parser.add_argument(
'''--val_metric''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , required=UpperCAmelCase__ , choices=['''bleu''', '''rouge2''', '''loss''', None])
parser.add_argument('''--eval_max_gen_length''' , type=UpperCAmelCase__ , default=UpperCAmelCase__ , help='''never generate more than n tokens''')
parser.add_argument('''--save_top_k''' , type=UpperCAmelCase__ , default=1 , required=UpperCAmelCase__ , help='''How many checkpoints to save''')
parser.add_argument(
'''--early_stopping_patience''' , type=UpperCAmelCase__ , default=-1 , required=UpperCAmelCase__ , help=(
'''-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So'''
''' val_check_interval will effect it.'''
) , )
return parser
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''translation'''
UpperCAmelCase__ = ['''loss''']
UpperCAmelCase__ = ['''bleu''']
UpperCAmelCase__ = '''bleu'''
def __init__( self : List[str] , UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : List[Any]) ->str:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = hparams.src_lang
A__ = hparams.tgt_lang
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple) ->dict:
'''simple docstring'''
return calculate_bleu(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None ) -> Optional[int]:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_lowerCAmelCase )
check_output_dir(_lowerCAmelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
A__ = SummarizationModule(_lowerCAmelCase )
else:
A__ = TranslationModule(_lowerCAmelCase )
A__ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith('''/tmp''' )
or str(args.output_dir ).startswith('''/var''' )
):
A__ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A__ = os.environ.get('''WANDB_PROJECT''' , _lowerCAmelCase )
A__ = WandbLogger(name=model.output_dir.name , project=_lowerCAmelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A__ = WandbLogger(name=model.output_dir.name , project=f"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
A__ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
A__ = False
A__ = args.val_metric == '''loss'''
A__ = generic_train(
_lowerCAmelCase , _lowerCAmelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowerCAmelCase ) , early_stopping_callback=_lowerCAmelCase , logger=_lowerCAmelCase , )
pickle_save(model.hparams , model.output_dir / '''hparams.pkl''' )
if not args.do_predict:
return model
A__ = ''''''
A__ = sorted(glob.glob(os.path.join(args.output_dir , '''*.ckpt''' ) , recursive=_lowerCAmelCase ) )
if checkpoints:
A__ = checkpoints[-1]
A__ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
_lowerCamelCase : str = pl.Trainer.add_argparse_args(parser)
_lowerCamelCase : Any = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_lowerCamelCase : Any = parser.parse_args()
main(args)
| 14 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCamelCase :
@staticmethod
def UpperCAmelCase_ ( *lowerCamelCase :Tuple , **lowerCamelCase :List[Any] ) -> Tuple:
pass
@is_pipeline_test
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
@require_torch
def UpperCAmelCase_ ( self :int ) -> Optional[Any]:
UpperCAmelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCamelCase ) , [
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}],
[{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "c"}, {"score": 0.3_33, "label": "b"}],
] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
] , )
@require_tf
def UpperCAmelCase_ ( self :List[str] ) -> Optional[int]:
UpperCAmelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [{"score": 0.3_33, "label": "a"}, {"score": 0.3_33, "label": "b"}, {"score": 0.3_33, "label": "c"}] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
[
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
{"score": 0.3_33, "label": ANY(lowerCamelCase )},
],
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self :str ) -> Dict:
UpperCAmelCase__ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def UpperCAmelCase_ ( self :List[Any] ) -> List[str]:
UpperCAmelCase__ = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCAmelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCAmelCase__ = image_classifier(lowerCamelCase , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
] , )
UpperCAmelCase__ = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCamelCase ) , [
[
{"score": 0.5_11, "label": "remote"},
{"score": 0.4_85, "label": "cat"},
{"score": 0.0_04, "label": "plane"},
],
]
* 5 , )
| 169 | 0 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
# Initialise PyTorch model
snake_case_ : Any = TaConfig.from_json_file(__a )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case_ : Dict = TaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__a , __a , __a )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 88 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
__magic_name__: int = MBartConfig
__magic_name__: str = {}
__magic_name__: Union[str, Any] = "gelu"
def __init__( self : List[str] , _A : Optional[int] , _A : List[Any]=13 , _A : List[Any]=7 , _A : Dict=True , _A : Tuple=False , _A : Optional[Any]=99 , _A : Dict=32 , _A : str=2 , _A : str=4 , _A : Tuple=37 , _A : Tuple=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=20 , _A : Dict=2 , _A : List[str]=1 , _A : Union[str, Any]=0 , ) -> List[Any]:
"""simple docstring"""
snake_case_ : str = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Union[str, Any] = is_training
snake_case_ : Optional[int] = use_labels
snake_case_ : Dict = vocab_size
snake_case_ : Union[str, Any] = hidden_size
snake_case_ : str = num_hidden_layers
snake_case_ : Optional[Any] = num_attention_heads
snake_case_ : Union[str, Any] = intermediate_size
snake_case_ : Any = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Optional[Any] = max_position_embeddings
snake_case_ : Optional[Any] = eos_token_id
snake_case_ : Tuple = pad_token_id
snake_case_ : int = bos_token_id
def UpperCAmelCase_ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ : Optional[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ : Union[str, Any] = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any] , _A : int ) -> str:
"""simple docstring"""
snake_case_ : Dict = TFMBartModel(config=_A ).get_decoder()
snake_case_ : Any = inputs_dict['input_ids']
snake_case_ : List[Any] = input_ids[:1, :]
snake_case_ : Dict = inputs_dict['attention_mask'][:1, :]
snake_case_ : Tuple = inputs_dict['head_mask']
snake_case_ : List[Any] = 1
# first forward pass
snake_case_ : Any = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
snake_case_ ,snake_case_ : str = outputs.to_tuple()
snake_case_ : int = past_key_values[1]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , ):
if attention_mask is None:
snake_case_ : Optional[int] = tf.cast(tf.math.not_equal(__a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
snake_case_ : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
snake_case_ : str = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( snake_case_ , snake_case_ , unittest.TestCase ):
__magic_name__: Tuple = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
__magic_name__: int = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
__magic_name__: Union[str, Any] = (
{
"conversational": TFMBartForConditionalGeneration,
"feature-extraction": TFMBartModel,
"summarization": TFMBartForConditionalGeneration,
"text2text-generation": TFMBartForConditionalGeneration,
"translation": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__: Tuple = True
__magic_name__: Tuple = False
__magic_name__: Any = False
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] , _A : List[Any] , _A : str , _A : int , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[Any] = TFMBartModelTester(self )
snake_case_ : List[Any] = ConfigTester(self , config_class=_A )
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
]
__magic_name__: Union[str, Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
]
__magic_name__: List[Any] = "facebook/mbart-large-en-ro"
@cached_property
def UpperCAmelCase_ ( self : str ) -> List[Any]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCAmelCase_ ( self : Optional[int] , **_A : str ) -> int:
"""simple docstring"""
snake_case_ : List[str] = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Dict ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
snake_case_ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
snake_case_ : Any = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def UpperCAmelCase_ ( self : str ) -> List[str]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 88 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.