code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
def a__ ( A__, A__ ): return number | (1 << position) def a__ ( A__, A__ ): return number & ~(1 << position) def a__ ( A__, A__ ): return number ^ (1 << position) def a__ ( A__, A__ ): return ((number >> position) & 1) == 1 def a__ ( A__, A__ ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
101
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
0
"""simple docstring""" import os # Precomputes a list of the 100 first triangular numbers __magic_name__ : Tuple = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)] def UpperCamelCase (): UpperCamelCase : List[Any] = os.path.dirname(os.path.realpath(SCREAMING_SNAKE_CASE ) ) UpperCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE , """words.txt""" ) UpperCamelCase : Optional[Any] = """""" with open(SCREAMING_SNAKE_CASE ) as f: UpperCamelCase : Dict = f.readline() UpperCamelCase : int = [word.strip("""\"""" ) for word in words.strip("""\r\n""" ).split(""",""" )] UpperCamelCase : Union[str, Any] = [ word for word in [sum(ord(SCREAMING_SNAKE_CASE ) - 64 for x in word ) for word in words] if word in TRIANGULAR_NUMBERS ] return len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(solution())
102
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
0
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class UpperCAmelCase : def __init__( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : str=None , __lowerCamelCase : Union[str, Any]="resnet50" , __lowerCamelCase : Any=3 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : str=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : int=True , ): """simple docstring""" _snake_case = parent _snake_case = out_indices if out_indices is not None else [4] _snake_case = stage_names _snake_case = out_features _snake_case = backbone _snake_case = batch_size _snake_case = image_size _snake_case = num_channels _snake_case = use_pretrained_backbone _snake_case = is_training def __UpperCAmelCase ( self : Any ): """simple docstring""" _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = self.get_config() return config, pixel_values def __UpperCAmelCase ( self : List[str] ): """simple docstring""" return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __UpperCAmelCase ( self : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ): """simple docstring""" _snake_case = TimmBackbone(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() with torch.no_grad(): _snake_case = model(__lowerCamelCase ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , ) def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case = config_and_inputs _snake_case = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch @require_timm class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ): A__ : Optional[int] = (TimmBackbone,) if is_torch_available() else () A__ : Dict = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {} A__ : Optional[Any] = False A__ : List[str] = False A__ : Any = False A__ : Optional[Any] = False def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case = TimmBackboneModelTester(self ) _snake_case = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" _snake_case = '''resnet18''' _snake_case = '''microsoft/resnet-18''' _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , use_timm_backbone=__lowerCamelCase ) _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , use_timm_backbone=__lowerCamelCase , out_indices=[1, 2, 3] ) _snake_case = AutoBackbone.from_pretrained(__lowerCamelCase , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' ) def __UpperCAmelCase ( self : Any ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" pass @unittest.skip('''TimmBackbone initialization is managed on the timm side''' ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" pass @unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' ) def __UpperCAmelCase ( self : List[str] ): """simple docstring""" pass @unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __UpperCAmelCase ( self : str ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''model weights aren\'t tied in TimmBackbone.''' ) def __UpperCAmelCase ( self : Tuple ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __UpperCAmelCase ( self : Optional[Any] ): """simple docstring""" pass @unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' ) def __UpperCAmelCase ( self : int ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' ) def __UpperCAmelCase ( self : str ): """simple docstring""" pass @unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' ) def __UpperCAmelCase ( self : str ): """simple docstring""" pass @unittest.skip('''Safetensors is not supported by timm.''' ) def __UpperCAmelCase ( self : List[str] ): """simple docstring""" pass @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __UpperCAmelCase ( self : List[Any] ): """simple docstring""" pass def __UpperCAmelCase ( self : int ): """simple docstring""" _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(__lowerCamelCase ) _snake_case = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __lowerCamelCase ) def __UpperCAmelCase ( self : Dict ): """simple docstring""" _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = self.has_attentions # no need to test all models as different heads yield the same functionality _snake_case = self.all_model_classes[0] _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) _snake_case = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) _snake_case = model(**__lowerCamelCase ) _snake_case = outputs[0][-1] # Encoder-/Decoder-only models _snake_case = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: _snake_case = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=__lowerCamelCase ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __UpperCAmelCase ( self : Union[str, Any] ): """simple docstring""" _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(**__lowerCamelCase ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None _snake_case = copy.deepcopy(__lowerCamelCase ) _snake_case = None _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(**__lowerCamelCase ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights _snake_case = copy.deepcopy(__lowerCamelCase ) _snake_case = False _snake_case = model_class(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() _snake_case = model(**__lowerCamelCase )
103
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { """configuration_nllb_moe""": [ """NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NllbMoeConfig""", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ """NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""", """NllbMoeForConditionalGeneration""", """NllbMoeModel""", """NllbMoePreTrainedModel""", """NllbMoeTop2Router""", """NllbMoeSparseMLP""", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
104
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
0
def __UpperCAmelCase ( lowerCamelCase_ : list , lowerCamelCase_ : int , lowerCamelCase_ : int = 0 , lowerCamelCase_ : int = 0 ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = right or len(lowerCamelCase_ ) - 1 if left > right: return -1 elif list_data[left] == key: return left elif list_data[right] == key: return right else: return search(lowerCamelCase_ , lowerCamelCase_ , left + 1 , right - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
105
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): __snake_case :str ='pt' elif is_tf_available(): __snake_case :List[Any] ='tf' else: __snake_case :Any ='jax' class lowerCAmelCase__ ( _lowerCamelCase , unittest.TestCase ): A_ : Tuple = ByTaTokenizer A_ : str = False def __UpperCamelCase ( self : List[str] ) -> Optional[int]: super().setUp() A = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: return ByTaTokenizer.from_pretrained('google/byt5-small' ) def __UpperCamelCase ( self : Optional[Any] , **__UpperCamelCase : Union[str, Any] ) -> ByTaTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname , **__UpperCamelCase ) def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : str=False , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : str=5 ) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for ByT5 because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. A = [] for i in range(len(__UpperCamelCase ) ): try: A = tokenizer.decode([i] , clean_up_tokenization_spaces=__UpperCamelCase ) except UnicodeDecodeError: pass toks.append((i, tok) ) A = list(filter(lambda __UpperCamelCase : re.match(R'^[ a-zA-Z]+$' , t[1] ) , __UpperCamelCase ) ) A = list(filter(lambda __UpperCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__UpperCamelCase ) , __UpperCamelCase ) ) if max_length is not None and len(__UpperCamelCase ) > max_length: A = toks[:max_length] if min_length is not None and len(__UpperCamelCase ) < min_length and len(__UpperCamelCase ) > 0: while len(__UpperCamelCase ) < min_length: A = toks + toks # toks_str = [t[1] for t in toks] A = [t[0] for t in toks] # Ensure consistency A = tokenizer.decode(__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase ) if " " not in output_txt and len(__UpperCamelCase ) > 1: A = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__UpperCamelCase ) + ' ' + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__UpperCamelCase ) ) if with_prefix_space: A = ' ' + output_txt A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) return output_txt, output_ids def __UpperCamelCase ( self : List[str] ) -> Any: A = self.ta_base_tokenizer A = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] ) A = tokenizer(['hi', 'I went to the gym', ''] ) self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] ) def __UpperCamelCase ( self : Any ) -> Optional[Any]: A = self.ta_base_tokenizer A = 'Unicode €.' A = tokenizer(__UpperCamelCase ) A = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded['input_ids'] , __UpperCamelCase ) # decoding A = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , 'Unicode €.</s>' ) A = tokenizer('e è é ê ë' ) A = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded['input_ids'] , __UpperCamelCase ) # decoding A = tokenizer.decode(__UpperCamelCase ) self.assertEqual(__UpperCamelCase , 'e è é ê ë</s>' ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: A = self.ta_base_tokenizer A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] # fmt: off A = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) if FRAMEWORK != "jax": A = list(batch.input_ids.numpy()[0] ) else: A = list(batch.input_ids.tolist()[0] ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: A = self.ta_base_tokenizer A = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] A = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase ) # check if input_ids are returned and no decoder_input_ids self.assertIn('input_ids' , __UpperCamelCase ) self.assertIn('attention_mask' , __UpperCamelCase ) self.assertNotIn('decoder_input_ids' , __UpperCamelCase ) self.assertNotIn('decoder_attention_mask' , __UpperCamelCase ) def __UpperCamelCase ( self : Dict ) -> Optional[Any]: A = self.ta_base_tokenizer A = [ 'Summary of the text.', 'Another summary.', ] A = tokenizer( text_target=__UpperCamelCase , max_length=32 , padding='max_length' , truncation=__UpperCamelCase , return_tensors=__UpperCamelCase ) self.assertEqual(32 , targets['input_ids'].shape[1] ) def __UpperCamelCase ( self : Union[str, Any] ) -> Any: A = self.ta_base_tokenizer A = ['A long paragraph for summarization. </s>'] A = ['Summary of the text. </s>'] # fmt: off A = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] A = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on A = tokenizer(__UpperCamelCase , text_target=__UpperCamelCase ) self.assertEqual(__UpperCamelCase , batch['input_ids'][0] ) self.assertEqual(__UpperCamelCase , batch['labels'][0] ) def __UpperCamelCase ( self : Any ) -> str: # safety check on max_len default value so we are sure the test works A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc A = tempfile.mkdtemp() A = ' He is very happy, UNwant\u00E9d,running' A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) A = tokenizer.__class__.from_pretrained(__UpperCamelCase ) A = after_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) shutil.rmtree(__UpperCamelCase ) A = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc A = tempfile.mkdtemp() A = ' He is very happy, UNwant\u00E9d,running' tokenizer.add_tokens(['bim', 'bambam'] ) A = tokenizer.additional_special_tokens additional_special_tokens.append('new_additional_special_token' ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) A = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) tokenizer.save_pretrained(__UpperCamelCase ) A = tokenizer.__class__.from_pretrained(__UpperCamelCase ) A = after_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) A = tokenizer.__class__.from_pretrained(__UpperCamelCase , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(__UpperCamelCase ) def __UpperCamelCase ( self : Tuple ) -> str: A = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: A = json.load(__UpperCamelCase ) with open(os.path.join(__UpperCamelCase , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: A = json.load(__UpperCamelCase ) A = [f'''<extra_id_{i}>''' for i in range(125 )] A = added_tokens_extra_ids + [ 'an_additional_special_token' ] A = added_tokens_extra_ids + [ 'an_additional_special_token' ] with open(os.path.join(__UpperCamelCase , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__UpperCamelCase , __UpperCamelCase ) with open(os.path.join(__UpperCamelCase , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(__UpperCamelCase , __UpperCamelCase ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files A = tokenizer_class.from_pretrained( __UpperCamelCase , ) self.assertIn( 'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained A = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=__UpperCamelCase )] A = tokenizer_class.from_pretrained( __UpperCamelCase , additional_special_tokens=__UpperCamelCase , ) self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens ) self.assertEqual( ['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , ) def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: A = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(__UpperCamelCase ) A = tokenizer_class.from_pretrained(__UpperCamelCase ) self.assertTrue(tokenizer.decode([255] ) == '' ) def __UpperCamelCase ( self : List[str] ) -> Any: pass def __UpperCamelCase ( self : Dict ) -> Optional[Any]: pass def __UpperCamelCase ( self : Any ) -> List[Any]: pass def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: pass def __UpperCamelCase ( self : int ) -> Dict: # The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings # and special added tokens as tokens A = self.get_tokenizers(fast=__UpperCamelCase , do_lower_case=__UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>'] A = tokenizer.convert_tokens_to_string(__UpperCamelCase ) self.assertIsInstance(__UpperCamelCase , __UpperCamelCase ) def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: A = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): A = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] A = 0 A = tokenizer.convert_ids_to_tokens( __UpperCamelCase , skip_special_tokens=__UpperCamelCase ) for attr in attributes_list: setattr(__UpperCamelCase , attr + '_id' , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , attr + '_id' ) , __UpperCamelCase ) setattr(__UpperCamelCase , attr + '_id' , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) self.assertEqual(getattr(__UpperCamelCase , attr + '_id' ) , __UpperCamelCase ) setattr(__UpperCamelCase , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(__UpperCamelCase , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(__UpperCamelCase , 'additional_special_tokens_ids' ) , [] ) setattr(__UpperCamelCase , 'additional_special_tokens_ids' , [token_id_to_test_setters] ) self.assertListEqual(getattr(__UpperCamelCase , 'additional_special_tokens' ) , [token_to_test_setters] ) self.assertListEqual(getattr(__UpperCamelCase , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
106
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class lowercase_ ( unittest.TestCase ): """simple docstring""" __lowerCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING __lowerCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def __UpperCAmelCase ( self : Any, UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Any ) -> List[str]: _A = TextaTextGenerationPipeline(model=UpperCamelCase__, tokenizer=UpperCamelCase__ ) return generator, ["Something to write", "Something else"] def __UpperCAmelCase ( self : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : Optional[Any] ) -> List[str]: _A = generator('Something there' ) self.assertEqual(UpperCamelCase__, [{'generated_text': ANY(UpperCamelCase__ )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) ) _A = generator(['This is great !', 'Something else'], num_return_sequences=2, do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], ], ) _A = generator( ['This is great !', 'Something else'], num_return_sequences=2, batch_size=2, do_sample=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], [{'generated_text': ANY(UpperCamelCase__ )}, {'generated_text': ANY(UpperCamelCase__ )}], ], ) with self.assertRaises(UpperCamelCase__ ): generator(4 ) @require_torch def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: _A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='pt' ) # do_sample=False necessary for reproducibility _A = generator('Something there', do_sample=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] ) _A = 3 _A = generator( 'Something there', num_return_sequences=UpperCamelCase__, num_beams=UpperCamelCase__, ) _A = [ {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'}, {'generated_text': ''}, ] self.assertEqual(UpperCamelCase__, UpperCamelCase__ ) _A = generator('This is a test', do_sample=UpperCamelCase__, num_return_sequences=2, return_tensors=UpperCamelCase__ ) self.assertEqual( UpperCamelCase__, [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ) _A = generator.model.config.eos_token_id _A = '<pad>' _A = generator( ['This is a test', 'This is a second test'], do_sample=UpperCamelCase__, num_return_sequences=2, batch_size=2, return_tensors=UpperCamelCase__, ) self.assertEqual( UpperCamelCase__, [ [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], [ {'generated_token_ids': ANY(torch.Tensor )}, {'generated_token_ids': ANY(torch.Tensor )}, ], ], ) @require_tf def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]: _A = pipeline('text2text-generation', model='patrickvonplaten/t5-tiny-random', framework='tf' ) # do_sample=False necessary for reproducibility _A = generator('Something there', do_sample=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__, [{'generated_text': ''}] )
107
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
0
import os import sys __a: Union[str, Any] = os.path.join(os.path.dirname(__file__), '''src''') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __a: Union[str, Any] = [ '''torch''', '''numpy''', '''tokenizers''', '''filelock''', '''requests''', '''tqdm''', '''regex''', '''sentencepiece''', '''sacremoses''', '''importlib_metadata''', '''huggingface_hub''', ] @add_start_docstrings(AutoConfig.__doc__ ) def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Union[str, Any]: return AutoConfig.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoTokenizer.__doc__ ) def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Any: return AutoTokenizer.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModel.__doc__ ) def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple: return AutoModel.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Tuple: return AutoModelForCausalLM.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> Optional[Any]: return AutoModelForMaskedLM.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[str]: return AutoModelForSequenceClassification.from_pretrained(*__snake_case , **__snake_case ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def _SCREAMING_SNAKE_CASE ( *__snake_case , **__snake_case ) -> List[Any]: return AutoModelForQuestionAnswering.from_pretrained(*__snake_case , **__snake_case )
108
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
'''simple docstring''' def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple: '''simple docstring''' _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) if n == 0: return 0 __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( __UpperCAmelCase , prices[i - 1] + naive_cut_rod_recursive(n - i , __UpperCAmelCase ) ) return max_revue def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any: '''simple docstring''' _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]: '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: __SCREAMING_SNAKE_CASE = float("""-inf""" ) for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max( __UpperCAmelCase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , __UpperCAmelCase , __UpperCAmelCase ) , ) __SCREAMING_SNAKE_CASE = max_revenue return max_rev[n] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]: '''simple docstring''' _enforce_args(__UpperCAmelCase , __UpperCAmelCase ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. __SCREAMING_SNAKE_CASE = [float("""-inf""" ) for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE = 0 for i in range(1 , n + 1 ): __SCREAMING_SNAKE_CASE = max_rev[i] for j in range(1 , i + 1 ): __SCREAMING_SNAKE_CASE = max(__UpperCAmelCase , prices[j - 1] + max_rev[i - j] ) __SCREAMING_SNAKE_CASE = max_revenue_i return max_rev[n] def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]: '''simple docstring''' if n < 0: __SCREAMING_SNAKE_CASE = f"""n must be greater than or equal to 0. Got n = {n}""" raise ValueError(__UpperCAmelCase ) if n > len(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = ( """Each integral piece of rod must have a corresponding price. """ f"""Got n = {n} but length of prices = {len(__UpperCAmelCase )}""" ) raise ValueError(__UpperCAmelCase ) def __magic_name__ ( ) -> List[str]: '''simple docstring''' __SCREAMING_SNAKE_CASE = [6, 10, 12, 15, 20, 23] __SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. __SCREAMING_SNAKE_CASE = 36 __SCREAMING_SNAKE_CASE = top_down_cut_rod(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = bottom_up_cut_rod(__UpperCAmelCase , __UpperCAmelCase ) __SCREAMING_SNAKE_CASE = naive_cut_rod_recursive(__UpperCAmelCase , __UpperCAmelCase ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
109
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
0
"""simple docstring""" def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ,_snake_case ): # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def lowerCamelCase ( _snake_case ,_snake_case ,_snake_case ): # Base Case if curr_ind == len(_snake_case ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 ,len(_snake_case ) ): if valid_connection(_snake_case ,_snake_case ,_snake_case ,_snake_case ): # Insert current vertex into path as next transition UpperCAmelCase__ : str = next_ver # Validate created path if util_hamilton_cycle(_snake_case ,_snake_case ,curr_ind + 1 ): return True # Backtrack UpperCAmelCase__ : int = -1 return False def lowerCamelCase ( _snake_case ,_snake_case = 0 ): UpperCAmelCase__ : str = [-1] * (len(_snake_case ) + 1) # initialize start and end of path with starting index UpperCAmelCase__ : Optional[Any] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(_snake_case ,_snake_case ,1 ) else []
110
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : int = { """shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""", # See all Dinat models at https://huggingface.co/models?filter=dinat } class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ): __lowerCamelCase = 'dinat' __lowerCamelCase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , _lowerCAmelCase=4 , _lowerCAmelCase=3 , _lowerCAmelCase=64 , _lowerCAmelCase=[3, 4, 6, 5] , _lowerCAmelCase=[2, 4, 8, 16] , _lowerCAmelCase=7 , _lowerCAmelCase=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _lowerCAmelCase=3.0 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-5 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase , ): super().__init__(**UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = patch_size UpperCAmelCase__ : int = num_channels UpperCAmelCase__ : List[str] = embed_dim UpperCAmelCase__ : List[Any] = depths UpperCAmelCase__ : List[str] = len(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = num_heads UpperCAmelCase__ : Union[str, Any] = kernel_size UpperCAmelCase__ : Dict = dilations UpperCAmelCase__ : Optional[int] = mlp_ratio UpperCAmelCase__ : Tuple = qkv_bias UpperCAmelCase__ : Dict = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = drop_path_rate UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : List[Any] = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model UpperCAmelCase__ : Dict = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) UpperCAmelCase__ : Dict = layer_scale_init_value UpperCAmelCase__ : int = ["stem"] + [f"stage{idx}" for idx in range(1 , len(UpperCamelCase_ ) + 1 )] UpperCAmelCase__ : str = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
79
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
0
from __future__ import annotations SCREAMING_SNAKE_CASE__ = list[list[int]] # assigning initial values to the grid SCREAMING_SNAKE_CASE__ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution SCREAMING_SNAKE_CASE__ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def lowercase ( a , a , a , a ): '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def lowercase ( a ): '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def lowercase ( a ): '''simple docstring''' if location := find_empty_location(a ): SCREAMING_SNAKE_CASE_ :Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(a , a , a , a ): SCREAMING_SNAKE_CASE_ :Dict = digit if sudoku(a ) is not None: return grid SCREAMING_SNAKE_CASE_ :Optional[Any] = 0 return None def lowercase ( a ): '''simple docstring''' for row in grid: for cell in row: print(a , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print("\nExample grid:\n" + "=" * 20) print_solution(example_grid) print("\nExample grid solution:") SCREAMING_SNAKE_CASE__ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print("Cannot find a solution.")
631
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
0
import inspect import os import unittest from pathlib import Path import torch import accelerate from accelerate.test_utils import execute_subprocess_async from accelerate.test_utils.testing import run_command class a_ ( unittest.TestCase ): A = inspect.getfile(accelerate.test_utils ) A = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] ) A = ['''accelerate''', '''launch'''] A = Path.home() / '''.cache/huggingface/accelerate''' A = '''default_config.yaml''' A = config_folder / config_file A = config_folder / '''_default_config.yaml''' A = Path('''tests/test_configs''' ) @classmethod def A_( cls ) -> int: """simple docstring""" if cls.config_path.is_file(): cls.config_path.rename(cls.changed_path ) @classmethod def A_( cls ) -> Tuple: """simple docstring""" if cls.changed_path.is_file(): cls.changed_path.rename(cls.config_path ) def A_( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.base_cmd if torch.cuda.is_available() and (torch.cuda.device_count() > 1): cmd += ["--multi_gpu"] execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() ) def A_( self ) -> List[str]: """simple docstring""" for config in sorted(self.test_config_path.glob('**/*.yaml' ) ): with self.subTest(config_file=UpperCamelCase_ ): execute_subprocess_async( self.base_cmd + ['--config_file', str(UpperCamelCase_ ), self.test_file_path] , env=os.environ.copy() ) def A_( self ) -> List[Any]: """simple docstring""" execute_subprocess_async(['accelerate', 'test'] , env=os.environ.copy() ) class a_ ( unittest.TestCase ): A = '''test-tpu''' A = '''us-central1-a''' A = '''ls''' A = ['''accelerate''', '''tpu-config'''] A = '''cd /usr/share''' A = '''tests/test_samples/test_command_file.sh''' A = '''Running gcloud compute tpus tpu-vm ssh''' def A_( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug'] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , UpperCamelCase_ , ) def A_( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command', self.command, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , UpperCamelCase_ , ) def A_( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--debug'] , return_stdout=UpperCamelCase_ ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def A_( self ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--debug'] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , UpperCamelCase_ , ) def A_( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--command', self.command, '--command', 'echo \"Hello World\"', '--debug', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all' , UpperCamelCase_ , ) def A_( self ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--command_file', self.command_file, '--debug'] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def A_( self ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/0_12_0.yaml', '--command_file', self.command_file, '--tpu_zone', self.tpu_zone, '--tpu_name', self.tpu_name, '--debug', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def A_( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + ['--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--debug'] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , ) def A_( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = run_command( self.cmd + [ '--config_file', 'tests/test_configs/latest.yaml', '--install_accelerate', '--accelerate_version', '12.0.0', '--debug', ] , return_stdout=UpperCamelCase_ , ) self.assertIn( f'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all' , UpperCamelCase_ , )
205
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _a = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST", "UniSpeechForCTC", "UniSpeechForPreTraining", "UniSpeechForSequenceClassification", "UniSpeechModel", "UniSpeechPreTrainedModel", ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
481
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
0
import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right SCREAMING_SNAKE_CASE : Dict = 250004 SCREAMING_SNAKE_CASE : Dict = 250020 @require_sentencepiece @require_tokenizers class UpperCamelCase ( __a , unittest.TestCase ): a__ :Union[str, Any] = MBartTokenizer a__ :List[str] = MBartTokenizerFast a__ :int = True a__ :Dict = True def A_ (self ) -> int: super().setUp() # We have a SentencePiece fixture for testing UpperCamelCase_ : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) tokenizer.save_pretrained(self.tmpdirname ) def A_ (self ) -> Union[str, Any]: UpperCamelCase_ : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ ) UpperCamelCase_ : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCamelCase_ : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCamelCase_ : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCamelCase_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ ) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def A_ (self ) -> Optional[int]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCamelCase_ : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCamelCase_ : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase_ : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase_ : int = tempfile.mkdtemp() UpperCamelCase_ : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ ) UpperCamelCase_ : Any = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCamelCase_ : Any = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way UpperCamelCase_ : int = tokenizer_r.from_pretrained(UpperCamelCase_ ) UpperCamelCase_ : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=True UpperCamelCase_ : Optional[int] = tempfile.mkdtemp() UpperCamelCase_ : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) UpperCamelCase_ : int = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ ) # Checks everything loads correctly in the same way UpperCamelCase_ : int = tokenizer_r.from_pretrained(UpperCamelCase_ ) UpperCamelCase_ : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) # Save tokenizer rust, legacy_format=False UpperCamelCase_ : Tuple = tempfile.mkdtemp() UpperCamelCase_ : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ ) UpperCamelCase_ : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCamelCase_ : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_ ) UpperCamelCase_ : str = tokenizer_p.from_pretrained(UpperCamelCase_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) ) shutil.rmtree(UpperCamelCase_ ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase ( unittest.TestCase ): a__ :Optional[int] = '''facebook/mbart-large-en-ro''' a__ :List[Any] = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] a__ :Optional[Any] = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] a__ :Optional[Any] = [8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2, EN_CODE] @classmethod def A_ (cls ) -> Any: UpperCamelCase_ : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCamelCase_ : Union[str, Any] = 1 return cls def A_ (self ) -> Any: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 ) def A_ (self ) -> List[Any]: UpperCamelCase_ : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ ) def A_ (self ) -> int: self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids ) UpperCamelCase_ : Union[str, Any] = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2] UpperCamelCase_ : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) UpperCamelCase_ : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ ) def A_ (self ) -> Dict: UpperCamelCase_ : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_ ) UpperCamelCase_ : Tuple = 10 UpperCamelCase_ : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , UpperCamelCase_ ) self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) def A_ (self ) -> Optional[Any]: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_026, 250_001] ) def A_ (self ) -> List[str]: UpperCamelCase_ : List[str] = tempfile.mkdtemp() UpperCamelCase_ : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_ ) UpperCamelCase_ : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ ) @require_torch def A_ (self ) -> List[str]: UpperCamelCase_ : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="""pt""" ) UpperCamelCase_ : Dict = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def A_ (self ) -> Any: UpperCamelCase_ : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCamelCase_ : Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCamelCase_ : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def A_ (self ) -> List[Any]: UpperCamelCase_ : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="""pt""" ) UpperCamelCase_ : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="""pt""" ) UpperCamelCase_ : int = targets["input_ids"] UpperCamelCase_ : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def A_ (self ) -> Optional[Any]: UpperCamelCase_ : int = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3_034, 2, 250_004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250_001, } , )
635
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
0
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
408
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
0
def snake_case (UpperCAmelCase__ ) -> str: return " ".join( ''.join(word[::-1] ) if len(UpperCAmelCase__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
57
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
0
'''simple docstring''' from __future__ import annotations from cmath import sqrt def lowerCamelCase ( _snake_case : Tuple ,_snake_case : Optional[Any] ,_snake_case : List[str] ): '''simple docstring''' if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) lowercase__ = b * b - 4 * a * c lowercase__ = (-b + sqrt(_snake_case )) / (2 * a) lowercase__ = (-b - sqrt(_snake_case )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def lowerCamelCase ( ): '''simple docstring''' lowercase__ = quadratic_roots(a=5 ,b=6 ,c=1 ) print(f'''The solutions are: {solutiona} and {solutiona}''' ) if __name__ == "__main__": main()
267
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] =logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] ={ 'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json', 'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json', 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json', 'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json', 'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json', 'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json', 'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json', 'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json', 'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json', 'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json', 'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json', 'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json', } class _UpperCAmelCase ( a_ ): """simple docstring""" __snake_case = """codegen""" __snake_case = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _lowercase=50400 , _lowercase=2048 , _lowercase=2048 , _lowercase=4096 , _lowercase=28 , _lowercase=16 , _lowercase=64 , _lowercase=None , _lowercase="gelu_new" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.0 , _lowercase=1E-5 , _lowercase=0.02 , _lowercase=True , _lowercase=50256 , _lowercase=50256 , _lowercase=False , **_lowercase , ) -> str: _lowerCamelCase : Tuple = vocab_size _lowerCamelCase : Union[str, Any] = n_ctx _lowerCamelCase : List[str] = n_positions _lowerCamelCase : Dict = n_embd _lowerCamelCase : List[Any] = n_layer _lowerCamelCase : List[Any] = n_head _lowerCamelCase : int = n_inner _lowerCamelCase : Optional[int] = rotary_dim _lowerCamelCase : Union[str, Any] = activation_function _lowerCamelCase : Optional[Any] = resid_pdrop _lowerCamelCase : Tuple = embd_pdrop _lowerCamelCase : List[Any] = attn_pdrop _lowerCamelCase : Union[str, Any] = layer_norm_epsilon _lowerCamelCase : Tuple = initializer_range _lowerCamelCase : Optional[Any] = use_cache _lowerCamelCase : Union[str, Any] = bos_token_id _lowerCamelCase : Optional[Any] = eos_token_id super().__init__( bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , tie_word_embeddings=UpperCamelCase_ , **UpperCamelCase_ ) class _UpperCAmelCase ( a_ ): """simple docstring""" def __init__( self , _lowercase , _lowercase = "default" , _lowercase = None , _lowercase = False , ) -> Tuple: super().__init__(UpperCamelCase_ , task=UpperCamelCase_ , patching_specs=UpperCamelCase_ , use_past=UpperCamelCase_ ) if not getattr(self._config , '''pad_token_id''' , UpperCamelCase_ ): # TODO: how to do that better? _lowerCamelCase : Optional[Any] = 0 @property def a__ ( self ) -> Optional[Any]: _lowerCamelCase : Dict = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase_ , direction='''inputs''' ) _lowerCamelCase : Dict = {0: "batch", 1: "past_sequence + sequence"} else: _lowerCamelCase : int = {0: "batch", 1: "sequence"} return common_inputs @property def a__ ( self ) -> List[Any]: return self._config.n_layer @property def a__ ( self ) -> Optional[int]: return self._config.n_head def a__ ( self , _lowercase , _lowercase = -1 , _lowercase = -1 , _lowercase = False , _lowercase = None , ) -> str: _lowerCamelCase : str = super(UpperCamelCase_ , self ).generate_dummy_inputs( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ ) # We need to order the input in the way they appears in the forward() _lowerCamelCase : str = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _lowerCamelCase : Dict = common_inputs["input_ids"].shape # Not using the same length for past_key_values _lowerCamelCase : List[Any] = seqlen + 2 _lowerCamelCase : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCamelCase : Any = [ (torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(self.num_layers ) ] _lowerCamelCase : Dict = common_inputs["attention_mask"] if self.use_past: _lowerCamelCase : Any = ordered_inputs["attention_mask"].dtype _lowerCamelCase : Any = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 ) return ordered_inputs @property def a__ ( self ) -> Dict: return 13
434
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
0
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort _A = logging.get_logger(__name__) _A = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class lowerCamelCase : '''simple docstring''' def __init__(self , _lowerCamelCase=None , **_lowerCamelCase ): """simple docstring""" logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) UpperCAmelCase__ : List[Any] = model UpperCAmelCase__ : Optional[Any] = kwargs.get("""model_save_dir""" , UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = kwargs.get("""latest_model_name""" , UpperCamelCase_ ) def __call__(self , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[str] = {k: np.array(UpperCamelCase_ ) for k, v in kwargs.items()} return self.model.run(UpperCamelCase_ , UpperCamelCase_ ) @staticmethod def _a (_lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ): """simple docstring""" if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) UpperCAmelCase__ : Any = "CPUExecutionProvider" return ort.InferenceSession(UpperCamelCase_ , providers=[provider] , sess_options=UpperCamelCase_ ) def _a (self , _lowerCamelCase , _lowerCamelCase = None , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCAmelCase__ : Dict = self.model_save_dir.joinpath(self.latest_model_name ) UpperCAmelCase__ : List[str] = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ ) try: shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCAmelCase__ : Tuple = self.model_save_dir.joinpath(UpperCamelCase_ ) if src_path.exists(): UpperCAmelCase__ : int = Path(UpperCamelCase_ ).joinpath(UpperCamelCase_ ) try: shutil.copyfile(UpperCamelCase_ , UpperCamelCase_ ) except shutil.SameFileError: pass def _a (self , _lowerCamelCase , **_lowerCamelCase , ): """simple docstring""" if os.path.isfile(UpperCamelCase_ ): logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ ) # saving model weights/files self._save_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def _a (cls , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): """simple docstring""" UpperCAmelCase__ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(UpperCamelCase_ ): UpperCAmelCase__ : int = OnnxRuntimeModel.load_model( os.path.join(UpperCamelCase_ , UpperCamelCase_ ) , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ ) UpperCAmelCase__ : Dict = Path(UpperCamelCase_ ) # load model from hub else: # download model UpperCAmelCase__ : Any = hf_hub_download( repo_id=UpperCamelCase_ , filename=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , ) UpperCAmelCase__ : Optional[int] = Path(UpperCamelCase_ ).parent UpperCAmelCase__ : List[str] = Path(UpperCamelCase_ ).name UpperCAmelCase__ : Union[str, Any] = OnnxRuntimeModel.load_model(UpperCamelCase_ , provider=UpperCamelCase_ , sess_options=UpperCamelCase_ ) return cls(model=UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def _a (cls , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ): """simple docstring""" UpperCAmelCase__ : Optional[int] = None if len(str(UpperCamelCase_ ).split("""@""" ) ) == 2: UpperCAmelCase__ : Any = model_id.split("""@""" ) return cls._from_pretrained( model_id=UpperCamelCase_ , revision=UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , **UpperCamelCase_ , )
182
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
0
import os from datetime import datetime as dt from github import Github UpperCAmelCase_ : Dict = [ "good first issue", "good second issue", "good difficult issue", "enhancement", "new pipeline/model", "new scheduler", "wip", ] def UpperCamelCase ( )-> str: """simple docstring""" A__ = Github(os.environ["GITHUB_TOKEN"] ) A__ = g.get_repo("huggingface/diffusers" ) A__ = repo.get_issues(state="open" ) for issue in open_issues: A__ = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A ) A__ = comments[0] if len(_A ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state="closed" ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state="open" ) issue.remove_from_labels("stale" ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) issue.add_to_labels("stale" ) if __name__ == "__main__": main()
491
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ): __lowerCamelCase = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline' def __UpperCAmelCase ( self , _lowerCAmelCase=0 ): UpperCAmelCase__ : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase_ ) ) UpperCAmelCase__ : Any = np.random.RandomState(UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.7_5, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def __UpperCAmelCase ( self ): UpperCAmelCase__ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs() UpperCAmelCase__ : List[Any] = pipe(**UpperCamelCase_ ).images UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) UpperCAmelCase__ : str = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self ): UpperCAmelCase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) UpperCAmelCase__ : List[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : List[str] = self.get_dummy_inputs() UpperCAmelCase__ : str = pipe(**UpperCamelCase_ ).images UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase__ : Any = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self ): UpperCAmelCase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) UpperCAmelCase__ : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) # warmup pass to apply optimizations UpperCAmelCase__ : int = pipe(**self.get_dummy_inputs() ) UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs() UpperCAmelCase__ : str = pipe(**UpperCamelCase_ ).images UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase__ : Tuple = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self ): UpperCAmelCase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) UpperCAmelCase__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : List[Any] = self.get_dummy_inputs() UpperCAmelCase__ : Dict = pipe(**UpperCamelCase_ ).images UpperCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase__ : int = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) UpperCAmelCase__ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs() UpperCAmelCase__ : List[Any] = pipe(**UpperCamelCase_ ).images UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase__ : List[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) UpperCAmelCase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : List[str] = self.get_dummy_inputs() UpperCAmelCase__ : Any = pipe(**UpperCamelCase_ ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) UpperCAmelCase__ : List[Any] = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): @property def __UpperCAmelCase ( self ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def __UpperCAmelCase ( self ): UpperCAmelCase__ : Tuple = ort.SessionOptions() UpperCAmelCase__ : int = False return options def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[int] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) UpperCAmelCase__ : Union[str, Any] = init_image.resize((768, 512) ) # using the PNDM scheduler by default UpperCAmelCase__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : Any = "A fantasy landscape, trending on artstation" UpperCAmelCase__ : Optional[int] = np.random.RandomState(0 ) UpperCAmelCase__ : Dict = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase_ , output_type="""np""" , ) UpperCAmelCase__ : Optional[int] = output.images UpperCAmelCase__ : Optional[Any] = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) UpperCAmelCase__ : Optional[int] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def __UpperCAmelCase ( self ): UpperCAmelCase__ : Any = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) UpperCAmelCase__ : Any = init_image.resize((768, 512) ) UpperCAmelCase__ : List[Any] = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) UpperCAmelCase__ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = "A fantasy landscape, trending on artstation" UpperCAmelCase__ : Any = np.random.RandomState(0 ) UpperCAmelCase__ : Optional[Any] = pipe( prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase_ , output_type="""np""" , ) UpperCAmelCase__ : List[str] = output.images UpperCAmelCase__ : Tuple = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) UpperCAmelCase__ : Tuple = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
79
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
0
def lowercase ( a , a ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Union[str, Any] = 0 SCREAMING_SNAKE_CASE_ :Tuple = len(a ) - 1 while left <= right: # avoid divided by 0 during interpolation if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE_ :Optional[Any] = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(a ): return None SCREAMING_SNAKE_CASE_ :List[Any] = sorted_collection[point] if current_item == item: return point else: if point < left: SCREAMING_SNAKE_CASE_ :Optional[int] = left SCREAMING_SNAKE_CASE_ :Any = point elif point > right: SCREAMING_SNAKE_CASE_ :Any = right SCREAMING_SNAKE_CASE_ :Optional[Any] = point else: if item < current_item: SCREAMING_SNAKE_CASE_ :List[str] = point - 1 else: SCREAMING_SNAKE_CASE_ :Optional[int] = point + 1 return None def lowercase ( a , a , a , a ): '''simple docstring''' if sorted_collection[left] == sorted_collection[right]: if sorted_collection[left] == item: return left else: return None SCREAMING_SNAKE_CASE_ :Tuple = left + ((item - sorted_collection[left]) * (right - left)) // ( sorted_collection[right] - sorted_collection[left] ) # out of range check if point < 0 or point >= len(a ): return None if sorted_collection[point] == item: return point elif point < left: return interpolation_search_by_recursion(a , a , a , a ) elif point > right: return interpolation_search_by_recursion(a , a , a , a ) else: if sorted_collection[point] > item: return interpolation_search_by_recursion( a , a , a , point - 1 ) else: return interpolation_search_by_recursion( a , a , point + 1 , a ) def lowercase ( a ): '''simple docstring''' if collection != sorted(a ): raise ValueError("Collection must be ascending sorted" ) return True if __name__ == "__main__": import sys SCREAMING_SNAKE_CASE__ = 0 if debug == 1: SCREAMING_SNAKE_CASE__ = [10, 30, 40, 45, 50, 66, 77, 93] try: __assert_sorted(collection) except ValueError: sys.exit("Sequence must be ascending sorted to apply interpolation search") SCREAMING_SNAKE_CASE__ = 67 SCREAMING_SNAKE_CASE__ = interpolation_search(collection, target) if result is not None: print(F'''{target} found at positions: {result}''') else: print("Not found")
631
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
0
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = FlaxAutoModelForSeqaSeqLM.from_config(config=SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = "wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"] if config.model_type == "t5": SCREAMING_SNAKE_CASE_ = "SelfAttention" if config.model_type == "longt5" and config.encoder_attention_type == "local": SCREAMING_SNAKE_CASE_ = "LocalSelfAttention" elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_ = "TransientGlobalSelfAttention" else: raise ValueError( 'Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`' ' attribute with a value from [\'local\', \'transient-global].' ) # Encoder for layer_index in range(config.num_layers ): SCREAMING_SNAKE_CASE_ = F'layers_{str(SCREAMING_SNAKE_CASE )}' # Self-Attention SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"] # Layer Normalization SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"] if split_mlp_wi: SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning SCREAMING_SNAKE_CASE_ = flax_model.params["encoder"]["block"][str(SCREAMING_SNAKE_CASE )]["layer"] SCREAMING_SNAKE_CASE_ = tax_attention_key SCREAMING_SNAKE_CASE_ = tax_attention_out SCREAMING_SNAKE_CASE_ = tax_attention_query SCREAMING_SNAKE_CASE_ = tax_attention_value SCREAMING_SNAKE_CASE_ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_ = tax_global_layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ = tax_mlp_wi_a SCREAMING_SNAKE_CASE_ = tax_mlp_wi_a else: SCREAMING_SNAKE_CASE_ = tax_mlp_wi SCREAMING_SNAKE_CASE_ = tax_mlp_wo SCREAMING_SNAKE_CASE_ = tax_mlp_layer_norm SCREAMING_SNAKE_CASE_ = flax_model_encoder_layer_block # Only for layer 0: SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T SCREAMING_SNAKE_CASE_ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T SCREAMING_SNAKE_CASE_ = tax_encoder_global_rel_embedding # Assigning SCREAMING_SNAKE_CASE_ = tax_model["target"]["encoder"]["encoder_norm"]["scale"] SCREAMING_SNAKE_CASE_ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): SCREAMING_SNAKE_CASE_ = F'layers_{str(SCREAMING_SNAKE_CASE )}' # Self-Attention SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][ "scale" ] # Encoder-Decoder-Attention SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"] SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_module["key"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_module["out"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_module["query"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_module["value"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"] # MLP if split_mlp_wi: SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"] else: SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"] SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"] # Layer Normalization SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"] # Assigning SCREAMING_SNAKE_CASE_ = flax_model.params["decoder"]["block"][str(SCREAMING_SNAKE_CASE )]["layer"] SCREAMING_SNAKE_CASE_ = tax_attention_key SCREAMING_SNAKE_CASE_ = tax_attention_out SCREAMING_SNAKE_CASE_ = tax_attention_query SCREAMING_SNAKE_CASE_ = tax_attention_value SCREAMING_SNAKE_CASE_ = tax_pre_attention_layer_norm SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_key SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_out SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_query SCREAMING_SNAKE_CASE_ = tax_enc_dec_attention_value SCREAMING_SNAKE_CASE_ = tax_cross_layer_norm if split_mlp_wi: SCREAMING_SNAKE_CASE_ = tax_mlp_wi_a SCREAMING_SNAKE_CASE_ = tax_mlp_wi_a else: SCREAMING_SNAKE_CASE_ = tax_mlp_wi SCREAMING_SNAKE_CASE_ = tax_mlp_wo SCREAMING_SNAKE_CASE_ = txa_mlp_layer_norm SCREAMING_SNAKE_CASE_ = flax_model_decoder_layer_block # Decoder Normalization SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"]["decoder_norm"]["scale"] SCREAMING_SNAKE_CASE_ = txa_decoder_norm # Only for layer 0: SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T SCREAMING_SNAKE_CASE_ = tax_decoder_rel_embedding # Token Embeddings SCREAMING_SNAKE_CASE_ = tax_model["target"]["token_embedder"]["embedding"] SCREAMING_SNAKE_CASE_ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: SCREAMING_SNAKE_CASE_ = tax_model["target"]["decoder"]["logits_dense"]["kernel"] flax_model.save_pretrained(SCREAMING_SNAKE_CASE ) print('T5X Model was sucessfully converted!' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint." ) parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.") parser.add_argument( "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model." ) SCREAMING_SNAKE_CASE__ : int = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
205
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class __A ( unittest.TestCase ): '''simple docstring''' def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = 1_0 def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = [1, 2, 3, 4] lowerCamelCase__ = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3] lowerCamelCase__ = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0] self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this." lowerCamelCase__ = process_story(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , [] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = "" lowerCamelCase__ = process_story(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , [] ) self.assertEqual(UpperCamelCase_ , [] ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = ( "It was the year of Our Lord one thousand seven hundred and " "seventy-five\n\nSpiritual revelations were conceded to England " "at that favoured period, as at this.\n@highlight\n\nIt was the best of times" ) lowerCamelCase__ = process_story(UpperCamelCase_ ) lowerCamelCase__ = [ "It was the year of Our Lord one thousand seven hundred and seventy-five.", "Spiritual revelations were conceded to England at that favoured period, as at this.", ] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) lowerCamelCase__ = ["It was the best of times."] self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = torch.tensor([1, 2, 3, 4] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 2_3 ).numpy() , expected.numpy() ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) lowerCamelCase__ = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() ) def __lowerCamelCase ( self ): '''simple docstring''' lowerCamelCase__ = 1_0_1 lowerCamelCase__ = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] ) lowerCamelCase__ = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) lowerCamelCase__ = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ ) np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
481
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
0
import warnings from typing import List from unittest.mock import Mock import torch from torch.utils.data import DataLoader, IterableDataset, TensorDataset from accelerate.accelerator import Accelerator from accelerate.utils.dataclasses import DistributedType class UpperCamelCase ( __a ): def __init__(self , __UpperCamelCase ) -> int: UpperCamelCase_ : int = data def __iter__(self ) -> List[Any]: for element in self.data: yield element def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict=True ): UpperCamelCase_ : str = Accelerator(even_batches=_SCREAMING_SNAKE_CASE ) assert accelerator.num_processes == 2, "this script expects that two GPUs are available" return accelerator def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str] = False ): if iterable: UpperCamelCase_ : Dict = DummyIterableDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) ) else: UpperCamelCase_ : List[Any] = TensorDataset(torch.as_tensor(range(_SCREAMING_SNAKE_CASE ) ) ) UpperCamelCase_ : List[Any] = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : Optional[Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE ) return dl def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , ): UpperCamelCase_ : List[Any] = create_dataloader(accelerator=_SCREAMING_SNAKE_CASE , dataset_size=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : List[str] = [len(batch[0] ) for batch in dl] if accelerator.process_index == 0: assert batch_sizes == process_0_expected_batch_sizes elif accelerator.process_index == 1: assert batch_sizes == process_1_expected_batch_sizes def lowerCAmelCase_ ( ): UpperCamelCase_ : Optional[Any] = create_accelerator() # without padding, we would expect a different number of batches verify_dataloader_batch_sizes( _SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , ) # without padding, we would expect the same number of batches, but different sizes verify_dataloader_batch_sizes( _SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , ) def lowerCAmelCase_ ( ): UpperCamelCase_ : Any = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE ) verify_dataloader_batch_sizes( _SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , ) verify_dataloader_batch_sizes( _SCREAMING_SNAKE_CASE , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , ) def lowerCAmelCase_ ( ): UpperCamelCase_ : Optional[Any] = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : Dict = torch.nn.Linear(1 , 1 ) UpperCamelCase_ : Tuple = accelerator.prepare(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : Any = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) UpperCamelCase_ : Optional[int] = [] with accelerator.join_uneven_inputs([ddp_model] ): for batch_idx, batch in enumerate(_SCREAMING_SNAKE_CASE ): UpperCamelCase_ : Tuple = ddp_model(batch[0].float() ) UpperCamelCase_ : List[str] = output.sum() loss.backward() batch_idxs.append(_SCREAMING_SNAKE_CASE ) accelerator.wait_for_everyone() if accelerator.process_index == 0: assert batch_idxs == [0, 1] elif accelerator.process_index == 1: assert batch_idxs == [0] def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ): with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w: with accelerator.join_uneven_inputs([Mock()] ): pass assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE ) assert "only supported for multi-GPU" in str(w[-1].message ) def lowerCAmelCase_ ( ): UpperCamelCase_ : int = True UpperCamelCase_ : Optional[int] = False UpperCamelCase_ : Any = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : int = torch.nn.Linear(1 , 1 ) UpperCamelCase_ : int = accelerator.prepare(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : Optional[Any] = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) UpperCamelCase_ : Any = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ): UpperCamelCase_ : Tuple = train_dl.batch_sampler.even_batches UpperCamelCase_ : Optional[int] = valid_dl.batch_sampler.even_batches assert train_dl_overridden_value == overridden_even_batches assert valid_dl_overridden_value == overridden_even_batches assert train_dl.batch_sampler.even_batches == default_even_batches assert valid_dl.batch_sampler.even_batches == default_even_batches def lowerCAmelCase_ ( ): UpperCamelCase_ : Optional[Any] = True UpperCamelCase_ : Any = False UpperCamelCase_ : str = create_accelerator(even_batches=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : Any = torch.nn.Linear(1 , 1 ) UpperCamelCase_ : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE ) create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : Union[str, Any] = create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 ) with warnings.catch_warnings(): warnings.filterwarnings("""ignore""" ) try: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ): UpperCamelCase_ : Any = batch_dl.batch_sampler.even_batches except AttributeError: # ensure attribute error is not raised when processing iterable dl raise AssertionError assert batch_dl_overridden_value == overridden_even_batches assert batch_dl.batch_sampler.even_batches == default_even_batches def lowerCAmelCase_ ( ): UpperCamelCase_ : Optional[int] = create_accelerator() UpperCamelCase_ : str = torch.nn.Linear(1 , 1 ) UpperCamelCase_ : Union[str, Any] = accelerator.prepare(_SCREAMING_SNAKE_CASE ) create_dataloader(_SCREAMING_SNAKE_CASE , dataset_size=3 , batch_size=1 , iterable=_SCREAMING_SNAKE_CASE ) with warnings.catch_warnings(record=_SCREAMING_SNAKE_CASE ) as w: with accelerator.join_uneven_inputs([ddp_model] , even_batches=_SCREAMING_SNAKE_CASE ): pass assert issubclass(w[-1].category , _SCREAMING_SNAKE_CASE ) assert "only supported for map-style datasets" in str(w[-1].message ) def lowerCAmelCase_ ( ): UpperCamelCase_ : int = create_accelerator() accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" ) test_default_ensures_even_batch_sizes() accelerator.print("""Run tests with even_batches disabled""" ) test_can_disable_even_batches() accelerator.print("""Test joining uneven inputs""" ) test_can_join_uneven_inputs() accelerator.print("""Test overriding even_batches when joining uneven inputs""" ) test_join_can_override_even_batches() accelerator.print("""Test overriding even_batches for mixed dataloader types""" ) test_join_can_override_for_mixed_type_dataloaders() accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" ) test_join_raises_warning_for_iterable_when_overriding_even_batches() accelerator.print("""Test join with non DDP distributed raises warning""" ) UpperCamelCase_ : Optional[Any] = accelerator.state.distributed_type UpperCamelCase_ : str = DistributedType.FSDP test_join_raises_warning_for_non_ddp_distributed(_SCREAMING_SNAKE_CASE ) UpperCamelCase_ : Union[str, Any] = original_state if __name__ == "__main__": main()
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
0
import re from ..utils import cached_file # docstyle-ignore snake_case__ : Union[str, Any] = '\nHuman: <<task>>\n\nAssistant: ' snake_case__ : str = 'huggingface-tools/default-prompts' snake_case__ : Optional[int] = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'} def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="run" ) ->List[str]: if prompt_or_repo_id is None: _UpperCAmelCase =DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , _lowerCamelCase ) is not None: return prompt_or_repo_id _UpperCAmelCase =cached_file( _lowerCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
408
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def __init__( self , _lowerCamelCase , _lowerCamelCase=1_3 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=9_9 , _lowerCamelCase=3_2 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=3_7 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=1_6 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=4 , ): UpperCamelCase_: str = parent UpperCamelCase_: Dict = batch_size UpperCamelCase_: Dict = seq_length UpperCamelCase_: List[str] = is_training UpperCamelCase_: List[Any] = use_attention_mask UpperCamelCase_: Union[str, Any] = use_token_type_ids UpperCamelCase_: Dict = use_labels UpperCamelCase_: Optional[Any] = vocab_size UpperCamelCase_: List[Any] = hidden_size UpperCamelCase_: Optional[int] = num_hidden_layers UpperCamelCase_: List[Any] = num_attention_heads UpperCamelCase_: int = intermediate_size UpperCamelCase_: Union[str, Any] = hidden_act UpperCamelCase_: Dict = hidden_dropout_prob UpperCamelCase_: Optional[Any] = attention_probs_dropout_prob UpperCamelCase_: Any = max_position_embeddings UpperCamelCase_: Any = type_vocab_size UpperCamelCase_: Optional[int] = type_sequence_label_size UpperCamelCase_: Any = initializer_range UpperCamelCase_: str = num_choices def _a ( self ): UpperCamelCase_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase_: Union[str, Any] = None if self.use_attention_mask: UpperCamelCase_: int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase_: Dict = None if self.use_token_type_ids: UpperCamelCase_: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase_: Union[str, Any] = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _a ( self ): UpperCamelCase_: Union[str, Any] = self.prepare_config_and_inputs() UpperCamelCase_: List[Any] = config_and_inputs UpperCamelCase_: int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def _a ( self ): UpperCamelCase_: int = self.prepare_config_and_inputs() UpperCamelCase_: Tuple = config_and_inputs UpperCamelCase_: str = True UpperCamelCase_: int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase_: List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class _lowerCAmelCase( UpperCAmelCase_ , unittest.TestCase ): """simple docstring""" a : Optional[Any] =True a : Optional[int] =( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def _a ( self ): UpperCamelCase_: List[Any] = FlaxBertModelTester(self ) @slow def _a ( self ): UpperCamelCase_: Any = FlaxBertModel.from_pretrained('bert-base-cased' ) UpperCamelCase_: Union[str, Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase_ )
57
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
0
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) SCREAMING_SNAKE_CASE__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case : lowerCAmelCase__ :Union[str, Any] = field( default=UpperCamelCase , metadata={"help": "Model type selected in the list: " + ", ".join(UpperCamelCase )} ) lowerCAmelCase__ :Tuple = field( default=UpperCamelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) lowerCAmelCase__ :Optional[int] = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) lowerCAmelCase__ :List[str] = field( default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) lowerCAmelCase__ :Union[str, Any] = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) lowerCAmelCase__ :int = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) lowerCAmelCase__ :int = field( default=UpperCamelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) lowerCAmelCase__ :Dict = field( default=UpperCamelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) lowerCAmelCase__ :Optional[int] = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lowerCAmelCase__ :Tuple = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lowerCAmelCase__ :int = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) lowerCAmelCase__ :Optional[Any] = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class snake_case (UpperCamelCase ): lowerCAmelCase__ :Union[str, Any] = "train" lowerCAmelCase__ :Any = "dev" class snake_case (UpperCamelCase ): lowerCAmelCase__ :Tuple = 42 lowerCAmelCase__ :int = 42 lowerCAmelCase__ :Optional[int] = 42 lowerCAmelCase__ :Dict = 42 def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ = None ,UpperCAmelCase_ = Split.train ,UpperCAmelCase_ = False ,UpperCAmelCase_ = None ,UpperCAmelCase_ = "pt" ,) -> Dict: lowercase__ = args lowercase__ = is_language_sensitive lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(UpperCamelCase_ ,UpperCamelCase_ ): try: lowercase__ = Split[mode] except KeyError: raise KeyError("mode is not a valid split name" ) lowercase__ = mode # Load data features from cache or dataset file lowercase__ = "v2" if args.version_2_with_negative else "v1" lowercase__ = os.path.join( cache_dir if cache_dir is not None else args.data_dir ,F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' ,) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lowercase__ = cached_features_file + ".lock" with FileLock(UpperCamelCase_ ): if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache: lowercase__ = time.time() lowercase__ = torch.load(UpperCamelCase_ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. lowercase__ = self.old_features["features"] lowercase__ = self.old_features.get("dataset" ,UpperCamelCase_ ) lowercase__ = self.old_features.get("examples" ,UpperCamelCase_ ) logger.info( F'''Loading features from cached file {cached_features_file} [took %.3f s]''' ,time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' " future run" ) else: if mode == Split.dev: lowercase__ = self.processor.get_dev_examples(args.data_dir ) else: lowercase__ = self.processor.get_train_examples(args.data_dir ) lowercase__ = squad_convert_examples_to_features( examples=self.examples ,tokenizer=UpperCamelCase_ ,max_seq_length=args.max_seq_length ,doc_stride=args.doc_stride ,max_query_length=args.max_query_length ,is_training=mode == Split.train ,threads=args.threads ,return_dataset=UpperCamelCase_ ,) lowercase__ = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples} ,UpperCamelCase_ ,) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self ) -> str: return len(self.features ) def __getitem__( self ,UpperCAmelCase_ ) -> str: lowercase__ = self.features[i] lowercase__ = torch.tensor(feature.input_ids ,dtype=torch.long ) lowercase__ = torch.tensor(feature.attention_mask ,dtype=torch.long ) lowercase__ = torch.tensor(feature.token_type_ids ,dtype=torch.long ) lowercase__ = torch.tensor(feature.cls_index ,dtype=torch.long ) lowercase__ = torch.tensor(feature.p_mask ,dtype=torch.float ) lowercase__ = torch.tensor(feature.is_impossible ,dtype=torch.float ) lowercase__ = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask} ) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible} ) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape ,dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: lowercase__ = torch.tensor(feature.start_position ,dtype=torch.long ) lowercase__ = torch.tensor(feature.end_position ,dtype=torch.long ) inputs.update({"start_positions": start_positions, "end_positions": end_positions} ) return inputs
267
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
0
"""simple docstring""" from __future__ import annotations def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]: # noqa: E741 while r - l > 1: _lowerCamelCase : Dict = (l + r) // 2 if v[m] >= key: _lowerCamelCase : Optional[int] = m else: _lowerCamelCase : List[str] = m # noqa: E741 return r def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->int: if len(SCREAMING_SNAKE_CASE_ ) == 0: return 0 _lowerCamelCase : Optional[int] = [0] * len(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : str = 1 _lowerCamelCase : List[Any] = v[0] for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ): if v[i] < tail[0]: _lowerCamelCase : Any = v[i] elif v[i] > tail[length - 1]: _lowerCamelCase : Optional[Any] = v[i] length += 1 else: _lowerCamelCase : int = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
434
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
"""simple docstring""" from numpy import exp, pi, sqrt def a__ ( lowerCAmelCase , lowerCAmelCase = 0.0 , lowerCAmelCase = 1.0 ) -> int: return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
182
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
0
import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase ( _UpperCAmelCase ): def __A ( self ): A__ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(UpperCamelCase_ , "width_multiplier" ) ) class UpperCamelCase : def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=64 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__="swish" , UpperCAmelCase__=3 , UpperCAmelCase__=32 , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.02 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=10 , UpperCAmelCase__=None , UpperCAmelCase__=0.25 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , ): A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = make_divisible(512 * width_multiplier , divisor=8 ) A__ = hidden_act A__ = conv_kernel_size A__ = output_stride A__ = classifier_dropout_prob A__ = use_labels A__ = is_training A__ = num_labels A__ = initializer_range A__ = scope A__ = width_multiplier A__ = ffn_dropout A__ = attn_dropout def __A ( self ): A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.num_labels ) A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) A__ = self.get_config() return config, pixel_values, labels, pixel_labels def __A ( self ): return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = MobileViTVaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() A__ = model(UpperCamelCase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = self.num_labels A__ = MobileViTVaForImageClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() A__ = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = self.num_labels A__ = MobileViTVaForSemanticSegmentation(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() A__ = model(UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) A__ = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __A ( self ): A__ = self.prepare_config_and_inputs() A__ = config_and_inputs A__ = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): lowerCAmelCase : Optional[Any] = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowerCAmelCase : Tuple = ( { """feature-extraction""": MobileViTVaModel, """image-classification""": MobileViTVaForImageClassification, """image-segmentation""": MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowerCAmelCase : int = False lowerCAmelCase : Any = False lowerCAmelCase : Any = False lowerCAmelCase : Optional[int] = False def __A ( self ): A__ = MobileViTVaModelTester(self ) A__ = MobileViTVaConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def __A ( self ): self.config_tester.run_common_tests() @unittest.skip(reason="MobileViTV2 does not use inputs_embeds" ) def __A ( self ): pass @unittest.skip(reason="MobileViTV2 does not support input and output embeddings" ) def __A ( self ): pass @unittest.skip(reason="MobileViTV2 does not output attentions" ) def __A ( self ): pass @require_torch_multi_gpu @unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." ) def __A ( self ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __A ( self ): pass def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase_ ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __A ( self ): def check_hidden_states_output(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) ) A__ = outputs.hidden_states A__ = 5 self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. A__ = 2 for i in range(len(UpperCamelCase_ ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase_ ) @slow def __A ( self ): for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = MobileViTVaModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def UpperCamelCase ( )-> Optional[int]: """simple docstring""" A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def __A ( self ): return ( MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ) if is_vision_available() else None ) @slow def __A ( self ): A__ = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to( UpperCamelCase_ ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase_ ) # verify the logits A__ = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , UpperCamelCase_ ) A__ = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4 ) ) @slow def __A ( self ): A__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) A__ = model.to(UpperCamelCase_ ) A__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) A__ = prepare_img() A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase_ ) A__ = outputs.logits # verify the logits A__ = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , UpperCamelCase_ ) A__ = torch.tensor( [ [[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]], [[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]], [[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]], ] , device=UpperCamelCase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase_ , atol=1e-4 ) ) @slow def __A ( self ): A__ = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) A__ = model.to(UpperCamelCase_ ) A__ = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" ) A__ = prepare_img() A__ = image_processor(images=UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase_ ) A__ = outputs.logits.detach().cpu() A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ , target_sizes=[(50, 60)] ) A__ = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ ) A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase_ ) A__ = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , UpperCamelCase_ )
491
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCAmelCase_ ( __lowerCamelCase , unittest.TestCase ): __lowerCamelCase = KandinskyVaaInpaintPipeline __lowerCamelCase = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image'] __lowerCamelCase = [ 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] __lowerCamelCase = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] __lowerCamelCase = False @property def __UpperCAmelCase ( self ): return 32 @property def __UpperCAmelCase ( self ): return 32 @property def __UpperCAmelCase ( self ): return self.time_input_dim @property def __UpperCAmelCase ( self ): return self.time_input_dim * 4 @property def __UpperCAmelCase ( self ): return 100 @property def __UpperCAmelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase__ : Dict = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCAmelCase__ : str = UNetaDConditionModel(**UpperCamelCase_ ) return model @property def __UpperCAmelCase ( self ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCAmelCase ( self ): torch.manual_seed(0 ) UpperCAmelCase__ : int = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCAmelCase ( self ): UpperCAmelCase__ : int = self.dummy_unet UpperCAmelCase__ : Tuple = self.dummy_movq UpperCAmelCase__ : Any = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCamelCase_ , ) UpperCAmelCase__ : List[str] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ): UpperCAmelCase__ : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCamelCase_ ) # create init_image UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((256, 256) ) # create mask UpperCAmelCase__ : Any = np.ones((64, 64) , dtype=np.floataa ) UpperCAmelCase__ : str = 0 if str(UpperCamelCase_ ).startswith("""mps""" ): UpperCAmelCase__ : int = torch.manual_seed(UpperCamelCase_ ) else: UpperCAmelCase__ : List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) UpperCAmelCase__ : List[str] = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCAmelCase ( self ): UpperCAmelCase__ : Dict = "cpu" UpperCAmelCase__ : Optional[Any] = self.get_dummy_components() UpperCAmelCase__ : int = self.pipeline_class(**UpperCamelCase_ ) UpperCAmelCase__ : List[str] = pipe.to(UpperCamelCase_ ) pipe.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : Any = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) ) UpperCAmelCase__ : Any = output.images UpperCAmelCase__ : int = pipe( **self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0] UpperCAmelCase__ : int = image[0, -3:, -3:, -1] UpperCAmelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1] print(f"image.shape {image.shape}" ) assert image.shape == (1, 64, 64, 3) UpperCAmelCase__ : Union[str, Any] = np.array( [0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def __UpperCAmelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase ): def __UpperCAmelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[Any] = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" ) UpperCAmelCase__ : int = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) UpperCAmelCase__ : Union[str, Any] = np.ones((768, 768) , dtype=np.floataa ) UpperCAmelCase__ : Union[str, Any] = 0 UpperCAmelCase__ : Optional[int] = "a hat" UpperCAmelCase__ : Any = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = KandinskyVaaInpaintPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa ) UpperCAmelCase__ : str = pipeline.to(UpperCamelCase_ ) pipeline.set_progress_bar_config(disable=UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_prior( UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() UpperCAmelCase__ : Optional[Any] = pipeline( image=UpperCamelCase_ , mask_image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , output_type="""np""" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
79
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
0
def lowercase ( ): '''simple docstring''' return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(a , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(F'''{solution() = }''')
631
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
0
def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError('iterations must be defined as integers' ) if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) or not number >= 1: raise ValueError( 'starting number must be\n and integer and be more than 0' ) if not iterations >= 1: raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' ) SCREAMING_SNAKE_CASE_ = "" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(SCREAMING_SNAKE_CASE ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
205
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
0
import warnings from ...utils import logging from .image_processing_mobilevit import MobileViTImageProcessor _a = logging.get_logger(__name__) class __A ( lowerCAmelCase ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use MobileViTImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
481
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
0
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : int ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): raise ValueError("""String lengths must match!""" ) UpperCamelCase_ : str = 0 for chara, chara in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
635
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
0
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class _a ( unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =tempfile.mkdtemp() # fmt: off _UpperCAmelCase =["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on _UpperCAmelCase =dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) _UpperCAmelCase =["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] _UpperCAmelCase ={"unk_token": "<unk>"} _UpperCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) _UpperCAmelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(UpperCamelCase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(UpperCamelCase_ ) ) _UpperCAmelCase ={ "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073], "image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711], } _UpperCAmelCase =os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def SCREAMING_SNAKE_CASE ( self , **_snake_case ): return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE ( self , **_snake_case ): return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE ( self , **_snake_case ): return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def SCREAMING_SNAKE_CASE ( self ): shutil.rmtree(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCAmelCase =[Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.get_tokenizer() _UpperCAmelCase =self.get_rust_tokenizer() _UpperCAmelCase =self.get_image_processor() _UpperCAmelCase =OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) _UpperCAmelCase =OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ ) _UpperCAmelCase =OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) _UpperCAmelCase =OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase =self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) _UpperCAmelCase =self.get_image_processor(do_normalize=UpperCamelCase_ ) _UpperCAmelCase =OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.get_image_processor() _UpperCAmelCase =self.get_tokenizer() _UpperCAmelCase =OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _UpperCAmelCase =self.prepare_image_inputs() _UpperCAmelCase =image_processor(UpperCamelCase_ , return_tensors="np" ) _UpperCAmelCase =processor(images=UpperCamelCase_ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.get_image_processor() _UpperCAmelCase =self.get_tokenizer() _UpperCAmelCase =OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _UpperCAmelCase ="lower newer" _UpperCAmelCase =processor(text=UpperCamelCase_ , return_tensors="np" ) _UpperCAmelCase =tokenizer(UpperCamelCase_ , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.get_image_processor() _UpperCAmelCase =self.get_tokenizer() _UpperCAmelCase =OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _UpperCAmelCase ="lower newer" _UpperCAmelCase =self.prepare_image_inputs() _UpperCAmelCase =processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase ="google/owlvit-base-patch32" _UpperCAmelCase =OwlViTProcessor.from_pretrained(UpperCamelCase_ ) _UpperCAmelCase =["cat", "nasa badge"] _UpperCAmelCase =processor(text=UpperCamelCase_ ) _UpperCAmelCase =16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase ="google/owlvit-base-patch32" _UpperCAmelCase =OwlViTProcessor.from_pretrained(UpperCamelCase_ ) _UpperCAmelCase =[["cat", "nasa badge"], ["person"]] _UpperCAmelCase =processor(text=UpperCamelCase_ ) _UpperCAmelCase =16 _UpperCAmelCase =len(UpperCamelCase_ ) _UpperCAmelCase =max([len(UpperCamelCase_ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase ="google/owlvit-base-patch32" _UpperCAmelCase =OwlViTProcessor.from_pretrained(UpperCamelCase_ ) _UpperCAmelCase =["cat", "nasa badge"] _UpperCAmelCase =processor(text=UpperCamelCase_ ) _UpperCAmelCase =16 _UpperCAmelCase =inputs["input_ids"] _UpperCAmelCase =[ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.get_image_processor() _UpperCAmelCase =self.get_tokenizer() _UpperCAmelCase =OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _UpperCAmelCase =self.prepare_image_inputs() _UpperCAmelCase =self.prepare_image_inputs() _UpperCAmelCase =processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.get_image_processor() _UpperCAmelCase =self.get_tokenizer() _UpperCAmelCase =OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) _UpperCAmelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase =processor.batch_decode(UpperCamelCase_ ) _UpperCAmelCase =tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
408
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
0
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int: while a != 0: UpperCamelCase_: str = b % a, a return b def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> int: if gcd(UpperCAmelCase__ , UpperCAmelCase__ ) != 1: UpperCamelCase_: Optional[Any] = F'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(UpperCAmelCase__ ) UpperCamelCase_: str = 1, 0, a UpperCamelCase_: Tuple = 0, 1, m while va != 0: UpperCamelCase_: List[str] = ua // va UpperCamelCase_: int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
57
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
0
'''simple docstring''' import re def lowerCamelCase ( _snake_case : str ): '''simple docstring''' return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" ,str_ )] def lowerCamelCase ( _snake_case : Optional[int] ): '''simple docstring''' lowercase__ = split_input(str_ ) return "".join( ["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] ) def lowerCamelCase ( _snake_case : List[Any] ,_snake_case : Optional[Any] ,_snake_case : List[str] ): '''simple docstring''' try: lowercase__ = split_input(_snake_case ) if upper: lowercase__ = "".join( [ separator.join([char.upper() for char in sub_str] ) for sub_str in string_split ] ) else: lowercase__ = "".join( [ separator.join([char.lower() for char in sub_str] ) for sub_str in string_split ] ) return res_str except IndexError: return "not valid string" def lowerCamelCase ( _snake_case : int ): '''simple docstring''' return to_simple_case(_snake_case ) def lowerCamelCase ( _snake_case : Optional[Any] ): '''simple docstring''' try: lowercase__ = to_simple_case(_snake_case ) return res_str[0].lower() + res_str[1:] except IndexError: return "not valid string" def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Optional[Any] ): '''simple docstring''' return to_complex_case(_snake_case ,_snake_case ,"_" ) def lowerCamelCase ( _snake_case : List[str] ,_snake_case : Union[str, Any] ): '''simple docstring''' return to_complex_case(_snake_case ,_snake_case ,"-" ) if __name__ == "__main__": __import__("doctest").testmod()
267
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
0
"""simple docstring""" def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 1000 ) ->int: _lowerCamelCase : List[Any] = 2**power _lowerCamelCase : str = str(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : List[str] = 0 for i in list_num: sum_of_num += int(SCREAMING_SNAKE_CASE_ ) return sum_of_num if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] =int(input('Enter the power of 2: ').strip()) print('2 ^ ', power, ' = ', 2**power) SCREAMING_SNAKE_CASE__ : Dict =solution(power) print('Sum of the digits is: ', result)
434
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = None SCREAMING_SNAKE_CASE = BloomTokenizerFast SCREAMING_SNAKE_CASE = BloomTokenizerFast SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = 'tokenizer_file' SCREAMING_SNAKE_CASE = {'bos_token': '<s>', 'eos_token': '</s>', 'unk_token': '<unk>', 'pad_token': '<pad>'} def _a (self ): """simple docstring""" super().setUp() UpperCAmelCase__ : Tuple = BloomTokenizerFast.from_pretrained("""bigscience/tokenizer""" ) tokenizer.save_pretrained(self.tmpdirname ) def _a (self , **_lowerCamelCase ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def _a (self ): """simple docstring""" UpperCAmelCase__ : int = self.get_rust_tokenizer() UpperCAmelCase__ : List[str] = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] UpperCAmelCase__ : Union[str, Any] = [[2175, 23714, 73173, 144252, 2], [77, 132619, 3478, 368, 109586, 35433, 2]] UpperCAmelCase__ : List[Any] = tokenizer.batch_encode_plus(UpperCamelCase_ )["input_ids"] self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase__ : str = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _a (self , _lowerCamelCase=6 ): """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): UpperCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input UpperCAmelCase__ : Any = "This is a simple input" UpperCAmelCase__ : str = ["This is a simple input 1", "This is a simple input 2"] UpperCAmelCase__ : Union[str, Any] = ("This is a simple input", "This is a pair") UpperCAmelCase__ : Union[str, Any] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.encode(UpperCamelCase_ , max_length=UpperCamelCase_ ) tokenizer_r.batch_encode_plus(UpperCamelCase_ , max_length=UpperCamelCase_ ) except ValueError: self.fail("""Bloom Tokenizer should be able to deal with padding""" ) UpperCAmelCase__ : Tuple = None # Hotfixing padding = None self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" ) # Simple input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" ) # Simple input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" , ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" ) # Pair input self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" ) # Pair input self.assertRaises( UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding="""max_length""" , ) def _a (self ): """simple docstring""" UpperCAmelCase__ : int = self.get_rust_tokenizer() UpperCAmelCase__ : Union[str, Any] = load_dataset("""xnli""" , """all_languages""" , split="""test""" , streaming=UpperCamelCase_ ) UpperCAmelCase__ : Any = next(iter(UpperCamelCase_ ) )["premise"] # pick up one data UpperCAmelCase__ : Optional[int] = list(sample_data.values() ) UpperCAmelCase__ : str = list(map(tokenizer.encode , UpperCamelCase_ ) ) UpperCAmelCase__ : Dict = [tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) for x in output_tokens] self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _a (self ): """simple docstring""" self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
182
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
0
import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class UpperCamelCase : def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=2 , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=10 , UpperCAmelCase__=3 , UpperCAmelCase__=32 * 4 , UpperCAmelCase__=32 * 6 , UpperCAmelCase__=4 , UpperCAmelCase__=32 , ): A__ = parent A__ = batch_size A__ = is_training A__ = use_auxiliary_loss A__ = num_queries A__ = num_channels A__ = min_size A__ = max_size A__ = num_labels A__ = mask_feature_size def __A ( self ): A__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( UpperCamelCase_ ) A__ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase_ ) A__ = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase_ ) > 0.5 ).float() A__ = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase_ ) > 0.5).long() A__ = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def __A ( self ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def __A ( self ): A__ = self.prepare_config_and_inputs() A__ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = output.encoder_hidden_states A__ = output.pixel_decoder_hidden_states A__ = output.transformer_decoder_hidden_states self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(UpperCamelCase_ ) , config.decoder_config.decoder_layers ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ): with torch.no_grad(): A__ = MaskFormerModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() A__ = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ ) A__ = model(UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(UpperCamelCase_ , UpperCamelCase_ ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = MaskFormerForInstanceSegmentation(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() def comm_check_on_output(UpperCAmelCase__ ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): A__ = model(pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ ) A__ = model(UpperCamelCase_ ) comm_check_on_output(UpperCamelCase_ ) A__ = model( pixel_values=UpperCamelCase_ , pixel_mask=UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ) comm_check_on_output(UpperCamelCase_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): lowerCAmelCase : Any = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () lowerCAmelCase : Any = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) lowerCAmelCase : List[str] = False lowerCAmelCase : Optional[int] = False lowerCAmelCase : Optional[Any] = False lowerCAmelCase : Tuple = False def __A ( self ): A__ = MaskFormerModelTester(self ) A__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ ) def __A ( self ): self.config_tester.run_common_tests() def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def __A ( self ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def __A ( self ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def __A ( self ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def __A ( self ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __A ( self ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def __A ( self ): pass def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase_ ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_ ) @slow def __A ( self ): for model_name in ["facebook/maskformer-swin-small-coco"]: A__ = MaskFormerModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def __A ( self ): A__ = (self.model_tester.min_size,) * 2 A__ = { "pixel_values": torch.randn((2, 3, *size) , device=UpperCamelCase_ ), "mask_labels": torch.randn((2, 10, *size) , device=UpperCamelCase_ ), "class_labels": torch.zeros(2 , 10 , device=UpperCamelCase_ ).long(), } A__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(UpperCamelCase_ ) A__ = model(**UpperCamelCase_ ) self.assertTrue(outputs.loss is not None ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(UpperCamelCase_ , **UpperCamelCase_ , output_hidden_states=UpperCamelCase_ ) def __A ( self ): A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase_ ).to(UpperCamelCase_ ) A__ = model(**UpperCamelCase_ , output_attentions=UpperCamelCase_ ) self.assertTrue(outputs.attentions is not None ) def __A ( self ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss A__ = self.all_model_classes[1] A__ = self.model_tester.prepare_config_and_inputs() A__ = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() A__ = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ).loss loss.backward() def __A ( self ): A__ = self.all_model_classes[1] A__ = self.model_tester.prepare_config_and_inputs() A__ = True A__ = True A__ = model_class(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.train() A__ = model(UpperCamelCase_ , mask_labels=UpperCamelCase_ , class_labels=UpperCamelCase_ ) A__ = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() A__ = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't A__ = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() A__ = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=UpperCamelCase_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) UpperCAmelCase_ : Any = 1e-4 def UpperCamelCase ( )-> Any: """simple docstring""" A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class UpperCamelCase ( unittest.TestCase ): @cached_property def __A ( self ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def __A ( self ): A__ = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(UpperCamelCase_ ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ ) A__ = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): A__ = model(**UpperCamelCase_ ) A__ = torch.tensor( [[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) A__ = torch.tensor( [[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) A__ = torch.tensor( [[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(UpperCamelCase_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) def __A ( self ): A__ = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(UpperCamelCase_ ) .eval() ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ ) A__ = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): A__ = model(**UpperCamelCase_ ) # masks_queries_logits A__ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) A__ = [ [-1.3_737_124, -1.7_724_937, -1.9_364_233], [-1.5_977_281, -1.9_867_939, -2.1_523_695], [-1.5_795_398, -1.9_269_832, -2.093_942], ] A__ = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) # class_queries_logits A__ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) A__ = torch.tensor( [ [1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0], [3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0], [1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0], ] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) def __A ( self ): A__ = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(UpperCamelCase_ ) .eval() ) A__ = self.default_image_processor A__ = prepare_img() A__ = image_processor(UpperCamelCase_ , return_tensors="pt" ).to(UpperCamelCase_ ) A__ = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 ) # check size self.assertEqual(UpperCamelCase_ , (1, 3, 800, 1_088) ) with torch.no_grad(): A__ = model(**UpperCamelCase_ ) # masks_queries_logits A__ = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) A__ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]] A__ = torch.tensor(UpperCamelCase_ ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) # class_queries_logits A__ = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) A__ = torch.tensor( [[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(UpperCamelCase_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase_ , atol=UpperCamelCase_ ) ) def __A ( self ): A__ = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(UpperCamelCase_ ) .eval() ) A__ = self.default_image_processor A__ = image_processor( [np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , ) A__ = inputs["pixel_values"].to(UpperCamelCase_ ) A__ = [el.to(UpperCamelCase_ ) for el in inputs["mask_labels"]] A__ = [el.to(UpperCamelCase_ ) for el in inputs["class_labels"]] with torch.no_grad(): A__ = model(**UpperCamelCase_ ) self.assertTrue(outputs.loss is not None )
491
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
0
import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class UpperCAmelCase_ : def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ): UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : Tuple = is_training UpperCAmelCase__ : Optional[int] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Optional[Any] = use_labels UpperCAmelCase__ : Dict = vocab_size UpperCAmelCase__ : Dict = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : Any = num_attention_heads UpperCAmelCase__ : int = intermediate_size UpperCAmelCase__ : Dict = hidden_act UpperCAmelCase__ : Any = hidden_dropout_prob UpperCAmelCase__ : Dict = attention_probs_dropout_prob UpperCAmelCase__ : Dict = max_position_embeddings UpperCAmelCase__ : str = type_vocab_size UpperCAmelCase__ : Optional[Any] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Optional[Any] = scope def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : Optional[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self ): return LlamaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): UpperCAmelCase__ : Optional[Any] = LlamaModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) UpperCAmelCase__ : Dict = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): UpperCAmelCase__ : Dict = True UpperCAmelCase__ : List[str] = LlamaModel(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Any = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) UpperCAmelCase__ : List[Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , ) UpperCAmelCase__ : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): UpperCAmelCase__ : List[Any] = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ): UpperCAmelCase__ : Dict = True UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : List[str] = LlamaForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # first forward pass UpperCAmelCase__ : Union[str, Any] = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ , ) UpperCAmelCase__ : Optional[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase__ : List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase__ : Dict = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase__ : Tuple = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["hidden_states"][0] UpperCAmelCase__ : int = model( UpperCamelCase_ , attention_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["hidden_states"][0] # select random slice UpperCAmelCase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase__ : Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase__ : Tuple = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-3 ) ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() ( UpperCAmelCase__ ) : Any = config_and_inputs UpperCAmelCase__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ): __lowerCamelCase = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () __lowerCamelCase = (LlamaForCausalLM,) if is_torch_available() else () __lowerCamelCase = ( { 'feature-extraction': LlamaModel, 'text-classification': LlamaForSequenceClassification, 'text-generation': LlamaForCausalLM, 'zero-shot': LlamaForSequenceClassification, } if is_torch_available() else {} ) __lowerCamelCase = False __lowerCamelCase = False def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[int] = LlamaModelTester(self ) UpperCAmelCase__ : str = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def __UpperCAmelCase ( self ): self.config_tester.run_common_tests() def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Optional[Any] = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = 3 UpperCAmelCase__ : List[Any] = input_dict["input_ids"] UpperCAmelCase__ : Tuple = input_ids.ne(1 ).to(UpperCamelCase_ ) UpperCAmelCase__ : List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase__ : Optional[Any] = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Dict = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = 3 UpperCAmelCase__ : Optional[Any] = "single_label_classification" UpperCAmelCase__ : str = input_dict["input_ids"] UpperCAmelCase__ : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ ) UpperCAmelCase__ : List[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) UpperCAmelCase__ : str = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Any = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = 3 UpperCAmelCase__ : Tuple = "multi_label_classification" UpperCAmelCase__ : Union[str, Any] = input_dict["input_ids"] UpperCAmelCase__ : List[str] = input_ids.ne(1 ).to(UpperCamelCase_ ) UpperCAmelCase__ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) UpperCAmelCase__ : Any = LlamaForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def __UpperCAmelCase ( self ): pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __UpperCAmelCase ( self , _lowerCAmelCase ): UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : str = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase__ : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase__ : Any = LlamaModel(UpperCamelCase_ ) original_model.to(UpperCamelCase_ ) original_model.eval() UpperCAmelCase__ : Any = original_model(UpperCamelCase_ ).last_hidden_state UpperCAmelCase__ : Tuple = original_model(UpperCamelCase_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase__ : str = {"type": scaling_type, "factor": 1_0.0} UpperCAmelCase__ : Union[str, Any] = LlamaModel(UpperCamelCase_ ) scaled_model.to(UpperCamelCase_ ) scaled_model.eval() UpperCAmelCase__ : Optional[Any] = scaled_model(UpperCamelCase_ ).last_hidden_state UpperCAmelCase__ : Tuple = scaled_model(UpperCamelCase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1e-5 ) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __UpperCAmelCase ( self ): UpperCAmelCase__ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338] UpperCAmelCase__ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" , device_map="""auto""" ) UpperCAmelCase__ : Optional[Any] = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 UpperCAmelCase__ : Union[str, Any] = torch.tensor([[-6.6_5_5_0, -4.1_2_2_7, -4.9_8_5_9, -3.2_4_0_6, 0.8_2_6_2, -3.0_0_3_3, 1.2_9_6_4, -3.3_6_9_9]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCAmelCase__ : Dict = torch.tensor([-1_2.8_2_8_1, -7.4_4_5_3, -0.4_6_3_9, -8.0_6_2_5, -7.2_5_0_0, -8.0_0_0_0, -6.4_8_8_3, -7.7_6_9_5, -7.8_4_3_8, -7.0_3_1_2, -6.2_1_8_8, -7.1_3_2_8, -1.8_4_9_6, 1.9_9_6_1, -8.6_2_5_0, -6.7_2_2_7, -1_2.8_2_8_1, -6.9_4_9_2, -7.0_7_4_2, -7.7_8_5_2, -7.5_8_2_0, -7.9_0_6_2, -6.9_3_7_5, -7.9_8_0_5, -8.3_4_3_8, -8.1_5_6_2, -8.0_4_6_9, -7.6_2_5_0, -7.7_4_2_2, -7.3_3_9_8,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __UpperCAmelCase ( self ): UpperCAmelCase__ : List[Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] UpperCAmelCase__ : List[Any] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" , device_map="""auto""" ) UpperCAmelCase__ : Any = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 UpperCAmelCase__ : int = torch.tensor([[-2.0_6_2_2, -1.2_7_9_4, -1.1_6_3_8, -0.9_7_8_8, -1.4_6_0_3, -1.0_2_3_8, -1.7_8_9_3, -1.4_4_1_1]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCAmelCase__ : str = torch.tensor([-8.1_4_0_6, -8.0_5_4_7, 2.7_4_6_1, -1.2_3_4_4, -0.1_4_4_8, -1.8_2_6_2, -1.0_0_2_0, -1.8_1_5_4, -1.6_8_9_5, -1.8_5_1_6, -2.3_5_7_4, -0.9_2_7_7, 3.7_5_9_8, 6.5_7_4_2, -1.2_9_9_8, -0.1_1_7_7, -8.1_4_0_6, -2.9_6_8_8, -2.9_1_9_9, -3.1_6_9_9, -3.5_2_5_4, -2.3_5_5_5, -2.7_9_8_8, -3.4_1_4_1, -2.8_2_6_2, -4.5_1_9_5, -3.3_3_7_9, -3.3_1_6_4, -2.7_8_3_2, -3.0_2_7_3] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def __UpperCAmelCase ( self ): UpperCAmelCase__ : str = [1, 306, 4658, 278, 6593, 310, 2834, 338] UpperCAmelCase__ : Any = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" , device_map="""auto""" ) UpperCAmelCase__ : Optional[Any] = model(torch.tensor(UpperCamelCase_ ) ) # Expected mean on dim = -1 UpperCAmelCase__ : int = torch.tensor([[-0.8_5_6_2, -1.8_5_2_0, -0.7_5_5_1, -0.4_1_6_2, -1.5_1_6_1, -1.2_0_3_8, -2.4_8_2_3, -2.3_2_5_4]] ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off UpperCAmelCase__ : Optional[Any] = torch.tensor([-2.2_2_2_7, 4.8_8_2_8, 0.9_0_2_3, -0.4_5_7_8, -0.7_8_7_1, -0.1_0_3_3, -0.6_2_2_1, -0.5_7_8_6, -0.7_8_0_3, -1.0_6_7_4, -1.2_9_2_0, -0.1_5_7_0, 0.8_0_0_8, 2.0_7_2_3, -0.9_4_9_7, 0.2_7_7_1, -2.2_2_2_7, -0.7_6_1_2, -1.4_3_4_6, -1.2_0_6_1, -1.6_4_2_6, -0.3_0_0_0, -0.7_1_3_9, -1.1_9_3_4, -1.8_6_9_1, -1.6_9_7_3, -1.5_9_4_7, -1.2_7_0_5, -0.3_5_2_3, -0.5_5_1_3] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def __UpperCAmelCase ( self ): UpperCAmelCase__ : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338] UpperCAmelCase__ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" , device_map="""auto""" ) UpperCAmelCase__ : Tuple = model(torch.tensor(UpperCamelCase_ ) ) UpperCAmelCase__ : Optional[int] = torch.tensor( [[-4.2_3_2_7, -3.3_3_6_0, -4.6_6_6_5, -4.7_6_3_1, -1.8_1_8_0, -3.4_1_7_0, -1.4_2_1_1, -3.1_8_1_0]] , dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) , UpperCamelCase_ , atol=1e-2 , rtol=1e-2 ) # fmt: off UpperCAmelCase__ : List[Any] = torch.tensor([-9.4_9_2_2, -3.9_5_5_1, 1.7_9_9_8, -5.6_7_5_8, -5.1_0_5_5, -5.8_9_8_4, -4.8_3_2_0, -6.8_0_8_6, -6.5_3_9_1, -5.6_1_7_2, -5.5_8_2_0, -5.5_3_5_2, 1.7_8_8_1, 3.6_2_8_9, -6.5_1_1_7, -3.4_7_8_5, -9.5_0_0_0, -6.0_3_5_2, -6.8_1_2_5, -6.0_1_9_5, -6.6_8_3_6, -5.4_7_2_7, -6.2_8_1_2, -6.0_3_9_1, -7.3_3_9_8, -7.4_2_9_7, -7.4_8_4_4, -6.5_8_2_0, -5.8_7_8_9, -5.5_3_1_2] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] , UpperCamelCase_ , atol=1e-5 , rtol=1e-5 ) @unittest.skip("""Model is curently gated""" ) @slow def __UpperCAmelCase ( self ): UpperCAmelCase__ : Dict = "Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi" UpperCAmelCase__ : int = "Simply put, the theory of relativity states that " UpperCAmelCase__ : Union[str, Any] = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) UpperCAmelCase__ : str = tokenizer.encode(UpperCamelCase_ , return_tensors="""pt""" ) UpperCAmelCase__ : int = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" , device_map="""sequential""" , use_safetensors=UpperCamelCase_ ) # greedy generation outputs UpperCAmelCase__ : int = model.generate(UpperCamelCase_ , max_new_tokens=64 , top_p=UpperCamelCase_ , temperature=1 , do_sample=UpperCamelCase_ ) UpperCAmelCase__ : List[Any] = tokenizer.decode(generated_ids[0] , skip_special_tokens=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
79
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _UpperCAmelCase ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any=13 , UpperCAmelCase : Optional[int]=3 , UpperCAmelCase : int=2_24 , UpperCAmelCase : int=30 , UpperCAmelCase : str=4_00 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[Any]=[0.5, 0.5, 0.5] , ): SCREAMING_SNAKE_CASE_ :Tuple = size if size is not None else {"height": 18, "width": 18} SCREAMING_SNAKE_CASE_ :List[Any] = parent SCREAMING_SNAKE_CASE_ :Tuple = batch_size SCREAMING_SNAKE_CASE_ :Tuple = num_channels SCREAMING_SNAKE_CASE_ :List[Any] = image_size SCREAMING_SNAKE_CASE_ :str = min_resolution SCREAMING_SNAKE_CASE_ :Tuple = max_resolution SCREAMING_SNAKE_CASE_ :Optional[Any] = do_resize SCREAMING_SNAKE_CASE_ :Any = size SCREAMING_SNAKE_CASE_ :Any = do_normalize SCREAMING_SNAKE_CASE_ :Any = image_mean SCREAMING_SNAKE_CASE_ :Optional[Any] = image_std def _snake_case ( self : str): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class _UpperCAmelCase ( lowercase , unittest.TestCase ): lowerCamelCase_ : int = ViTImageProcessor if is_vision_available() else None def _snake_case ( self : Any): SCREAMING_SNAKE_CASE_ :Optional[Any] = EfficientFormerImageProcessorTester(self) @property def _snake_case ( self : Union[str, Any]): return self.image_proc_tester.prepare_image_processor_dict() def _snake_case ( self : Tuple): SCREAMING_SNAKE_CASE_ :Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def _snake_case ( self : Dict): pass def _snake_case ( self : Tuple): SCREAMING_SNAKE_CASE_ :Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images SCREAMING_SNAKE_CASE_ :str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input SCREAMING_SNAKE_CASE_ :Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def _snake_case ( self : Tuple): SCREAMING_SNAKE_CASE_ :int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors SCREAMING_SNAKE_CASE_ :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input SCREAMING_SNAKE_CASE_ :Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_ :Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def _snake_case ( self : Any): SCREAMING_SNAKE_CASE_ :Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors SCREAMING_SNAKE_CASE_ :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input SCREAMING_SNAKE_CASE_ :Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched SCREAMING_SNAKE_CASE_ :Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
631
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
0
import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Dict = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear", "self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed", "self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "ctc_proj", "mask_emb": "masked_spec_embed", } SCREAMING_SNAKE_CASE__ : Tuple = [ "ctc_proj", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' for attribute in key.split('.' ): SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: SCREAMING_SNAKE_CASE_ = hf_pointer.shape assert hf_shape == value.shape, ( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE_ = value elif weight_type == "bias": SCREAMING_SNAKE_CASE_ = value else: SCREAMING_SNAKE_CASE_ = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict() SCREAMING_SNAKE_CASE_ = hf_model.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE_ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) SCREAMING_SNAKE_CASE_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: SCREAMING_SNAKE_CASE_ = True if "*" in mapped_key: SCREAMING_SNAKE_CASE_ = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] SCREAMING_SNAKE_CASE_ = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "weight_g" in name: SCREAMING_SNAKE_CASE_ = "weight_g" elif "weight_v" in name: SCREAMING_SNAKE_CASE_ = "weight_v" elif "bias" in name and "relative_attention_bias" not in name: SCREAMING_SNAKE_CASE_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj SCREAMING_SNAKE_CASE_ = "weight" else: SCREAMING_SNAKE_CASE_ = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F'Unused weights: {unused_weights}' ) def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = full_name.split('conv_layers.' )[-1] SCREAMING_SNAKE_CASE_ = name.split('.' ) SCREAMING_SNAKE_CASE_ = int(items[0] ) SCREAMING_SNAKE_CASE_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'{full_name} has size {value.shape}, but' F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = torch.load(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = WavLMConfigOrig(checkpoint['cfg'] ) SCREAMING_SNAKE_CASE_ = WavLMOrig(SCREAMING_SNAKE_CASE ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: SCREAMING_SNAKE_CASE_ = WavLMConfig.from_pretrained(SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = WavLMConfig() SCREAMING_SNAKE_CASE_ = WavLMModel(SCREAMING_SNAKE_CASE ) recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) hf_wavlm.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
205
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: _a = None _a = logging.get_logger(__name__) _a = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} _a = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } _a = { "camembert-base": 512, } _a = "▁" class __A ( lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = VOCAB_FILES_NAMES lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ = ["""input_ids""", """attention_mask"""] lowerCAmelCase_ = CamembertTokenizer def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=["<s>NOTUSED", "</s>NOTUSED"] , **__lowerCAmelCase , ): '''simple docstring''' lowerCamelCase__ = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) lowerCamelCase__ = vocab_file lowerCamelCase__ = False if not self.vocab_file else True def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] lowerCamelCase__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return lowerCamelCase__ = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
481
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
0
from collections import namedtuple SCREAMING_SNAKE_CASE : str = namedtuple("from_to", "from_ to") SCREAMING_SNAKE_CASE : Tuple = { "cubicmeter": from_to(1, 1), "litre": from_to(0.001, 1000), "kilolitre": from_to(1, 1), "gallon": from_to(0.00454, 264.172), "cubicyard": from_to(0.76455, 1.30795), "cubicfoot": from_to(0.028, 35.3147), "cup": from_to(0.000236588, 4226.75), } def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ): if from_type not in METRIC_CONVERSION: raise ValueError( f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n''' + """, """.join(_SCREAMING_SNAKE_CASE ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n''' + """, """.join(_SCREAMING_SNAKE_CASE ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
0
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging snake_case__ : int = logging.get_logger(__name__) snake_case__ : str = { 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json', # See all BART models at https://huggingface.co/models?filter=bart } class _a ( A__ ): """simple docstring""" snake_case ="""bart""" snake_case =["""past_key_values"""] snake_case ={"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""} def __init__( self , _snake_case=5_0265 , _snake_case=1024 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=12 , _snake_case=4096 , _snake_case=16 , _snake_case=0.0 , _snake_case=0.0 , _snake_case="gelu" , _snake_case=1024 , _snake_case=0.1 , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.02 , _snake_case=0.0 , _snake_case=False , _snake_case=True , _snake_case=3 , _snake_case=1 , _snake_case=0 , _snake_case=2 , _snake_case=True , _snake_case=2 , _snake_case=2 , **_snake_case , ): _UpperCAmelCase =vocab_size _UpperCAmelCase =max_position_embeddings _UpperCAmelCase =d_model _UpperCAmelCase =encoder_ffn_dim _UpperCAmelCase =encoder_layers _UpperCAmelCase =encoder_attention_heads _UpperCAmelCase =decoder_ffn_dim _UpperCAmelCase =decoder_layers _UpperCAmelCase =decoder_attention_heads _UpperCAmelCase =dropout _UpperCAmelCase =attention_dropout _UpperCAmelCase =activation_dropout _UpperCAmelCase =activation_function _UpperCAmelCase =init_std _UpperCAmelCase =encoder_layerdrop _UpperCAmelCase =decoder_layerdrop _UpperCAmelCase =classifier_dropout _UpperCAmelCase =use_cache _UpperCAmelCase =encoder_layers _UpperCAmelCase =scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , forced_eos_token_id=UpperCamelCase_ , **UpperCamelCase_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCamelCase_ ): _UpperCAmelCase =self.bos_token_id warnings.warn( F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. " "The config can simply be saved and uploaded again to be fixed." ) class _a ( A__ ): """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self ): if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase =OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _UpperCAmelCase ={0: "batch"} _UpperCAmelCase ={0: "batch", 1: "past_decoder_sequence + sequence"} else: _UpperCAmelCase ={0: "batch", 1: "decoder_sequence"} _UpperCAmelCase ={0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCamelCase_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. _UpperCAmelCase =OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: _UpperCAmelCase =self.num_layers for i in range(UpperCamelCase_ ): _UpperCAmelCase ={0: "batch", 2: "past_sequence + sequence"} _UpperCAmelCase ={0: "batch", 2: "past_sequence + sequence"} else: _UpperCAmelCase =OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def SCREAMING_SNAKE_CASE ( self ): if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase =super().outputs else: _UpperCAmelCase =super(UpperCamelCase_ , self ).outputs if self.use_past: _UpperCAmelCase =self.num_layers for i in range(UpperCamelCase_ ): _UpperCAmelCase ={0: "batch", 2: "past_sequence + sequence"} _UpperCAmelCase ={0: "batch", 2: "past_sequence + sequence"} return common_outputs def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ): _UpperCAmelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # Generate decoder inputs _UpperCAmelCase =seq_length if not self.use_past else 1 _UpperCAmelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) _UpperCAmelCase ={F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} _UpperCAmelCase =dict(**UpperCamelCase_ , **UpperCamelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _UpperCAmelCase =common_inputs["input_ids"].shape _UpperCAmelCase =common_inputs["decoder_input_ids"].shape[1] _UpperCAmelCase =self.num_attention_heads _UpperCAmelCase =( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) _UpperCAmelCase =decoder_seq_length + 3 _UpperCAmelCase =( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) _UpperCAmelCase =torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ )] , dim=1 ) _UpperCAmelCase =[] # If the number of encoder and decoder layers are present in the model configuration, both are considered _UpperCAmelCase =self.num_layers _UpperCAmelCase =min(UpperCamelCase_ , UpperCamelCase_ ) _UpperCAmelCase =max(UpperCamelCase_ , UpperCamelCase_ ) - min_num_layers _UpperCAmelCase ="encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(UpperCamelCase_ ): common_inputs["past_key_values"].append( ( torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ ), ) ) # TODO: test this. _UpperCAmelCase =encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(UpperCamelCase_ , UpperCamelCase_ ): common_inputs["past_key_values"].append((torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) ) return common_inputs def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ): _UpperCAmelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch _UpperCAmelCase =common_inputs["input_ids"].shape # Not using the same length for past_key_values _UpperCAmelCase =seqlen + 2 _UpperCAmelCase =self.num_layers _UpperCAmelCase =self.num_attention_heads _UpperCAmelCase =( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) _UpperCAmelCase =common_inputs["attention_mask"].dtype _UpperCAmelCase =torch.cat( [common_inputs["attention_mask"], torch.ones(UpperCamelCase_ , UpperCamelCase_ , dtype=UpperCamelCase_ )] , dim=1 ) _UpperCAmelCase =[ (torch.zeros(UpperCamelCase_ ), torch.zeros(UpperCamelCase_ )) for _ in range(UpperCamelCase_ ) ] return common_inputs def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ): _UpperCAmelCase =compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX _UpperCAmelCase =tokenizer.num_special_tokens_to_add(UpperCamelCase_ ) _UpperCAmelCase =compute_effective_axis_dimension( UpperCamelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCamelCase_ ) # Generate dummy inputs according to compute batch and sequence _UpperCAmelCase =[" ".join([tokenizer.unk_token] ) * seq_length] * batch_size _UpperCAmelCase =dict(tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ ) ) return common_inputs def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case = -1 , _snake_case = -1 , _snake_case = False , _snake_case = None , ): if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase =self._generate_dummy_inputs_for_default_and_seqaseq_lm( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ ) elif self.task == "causal-lm": _UpperCAmelCase =self._generate_dummy_inputs_for_causal_lm( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ ) else: _UpperCAmelCase =self._generate_dummy_inputs_for_sequence_classification_and_question_answering( UpperCamelCase_ , batch_size=UpperCamelCase_ , seq_length=UpperCamelCase_ , is_pair=UpperCamelCase_ , framework=UpperCamelCase_ ) return common_inputs def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case ): if self.task in ["default", "seq2seq-lm"]: _UpperCAmelCase =super()._flatten_past_key_values_(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) else: _UpperCAmelCase =super(UpperCamelCase_ , self )._flatten_past_key_values_( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
408
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
def snake_case (UpperCAmelCase__ ) -> float: UpperCamelCase_: List[Any] = 0 while len(UpperCAmelCase__ ) > 1: UpperCamelCase_: Optional[int] = 0 # Consider two files with minimum cost to be merged for _ in range(2 ): UpperCamelCase_: Dict = files.index(min(UpperCAmelCase__ ) ) temp += files[min_index] files.pop(UpperCAmelCase__ ) files.append(UpperCAmelCase__ ) optimal_merge_cost += temp return optimal_merge_cost if __name__ == "__main__": import doctest doctest.testmod()
57
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE__ = { "configuration_owlvit": [ "OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OwlViTConfig", "OwlViTOnnxConfig", "OwlViTTextConfig", "OwlViTVisionConfig", ], "processing_owlvit": ["OwlViTProcessor"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = ["OwlViTFeatureExtractor"] SCREAMING_SNAKE_CASE__ = ["OwlViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ "OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "OwlViTModel", "OwlViTPreTrainedModel", "OwlViTTextModel", "OwlViTVisionModel", "OwlViTForObjectDetection", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
267
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: SCREAMING_SNAKE_CASE__ : str =None SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] ='▁' SCREAMING_SNAKE_CASE__ : Union[str, Any] ={'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} SCREAMING_SNAKE_CASE__ : List[Any] ={ 'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}, 'tokenizer_file': { 'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json' }, } SCREAMING_SNAKE_CASE__ : Union[str, Any] ={ 'google/pegasus-xsum': 512, } class _UpperCAmelCase ( a_ ): """simple docstring""" __snake_case = VOCAB_FILES_NAMES __snake_case = PRETRAINED_VOCAB_FILES_MAP __snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __snake_case = PegasusTokenizer __snake_case = ["""input_ids""", """attention_mask"""] def __init__( self , _lowercase=None , _lowercase=None , _lowercase="<pad>" , _lowercase="</s>" , _lowercase="<unk>" , _lowercase="<mask_2>" , _lowercase="<mask_1>" , _lowercase=None , _lowercase=103 , **_lowercase , ) -> Optional[int]: _lowerCamelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise TypeError( F'''additional_special_tokens should be of type {type(UpperCamelCase_ )}, but is''' F''' {type(UpperCamelCase_ )}''' ) _lowerCamelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F'''<unk_{i}>''' for i in range(len(UpperCamelCase_ ) , self.offset - 1 ) ] if len(set(UpperCamelCase_ ) ) != len(UpperCamelCase_ ): raise ValueError( '''Please make sure that the provided additional_special_tokens do not contain an incorrectly''' F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) _lowerCamelCase : str = additional_special_tokens_extended else: _lowerCamelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) _lowerCamelCase : Optional[int] = vocab_file _lowerCamelCase : List[str] = False if not self.vocab_file else True def a__ ( self , _lowercase ) -> int: _lowerCamelCase : int = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( '''There should be 3 special tokens: mask_token, pad_token, and eos_token +''' F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def a__ ( self , _lowercase , _lowercase = None , _lowercase = False ) -> Tuple: if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_ ) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def a__ ( self , _lowercase , _lowercase=None ) -> str: if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a__ ( self , _lowercase , _lowercase = None ) -> List[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(UpperCamelCase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return _lowerCamelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ): copyfile(self.vocab_file , UpperCamelCase_ ) return (out_vocab_file,)
434
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
"""simple docstring""" from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class lowerCamelCase ( yaml.SafeLoader ): '''simple docstring''' def _a (self , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value] UpperCAmelCase__ : List[str] = [tuple(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else key for key in keys] UpperCAmelCase__ : List[Any] = Counter(UpperCamelCase_ ) UpperCAmelCase__ : int = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" ) def _a (self , _lowerCamelCase , _lowerCamelCase=False ): """simple docstring""" UpperCAmelCase__ : Tuple = super().construct_mapping(UpperCamelCase_ , deep=UpperCamelCase_ ) self._check_no_duplicates_on_constructed_node(UpperCamelCase_ ) return mapping def a__ ( lowerCAmelCase ) -> Tuple[Optional[str], str]: UpperCAmelCase__ : Dict = list(readme_content.splitlines() ) if full_content and full_content[0] == "---" and "---" in full_content[1:]: UpperCAmelCase__ : int = full_content[1:].index("""---""" ) + 1 UpperCAmelCase__ : Optional[Any] = "\n".join(full_content[1:sep_idx] ) return yamlblock, "\n".join(full_content[sep_idx + 1 :] ) return None, "\n".join(lowerCAmelCase ) class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = {'train_eval_index'} # train-eval-index in the YAML metadata @classmethod def _a (cls , _lowerCamelCase ): """simple docstring""" with open(UpperCamelCase_ , encoding="""utf-8""" ) as readme_file: UpperCAmelCase__ : Optional[Any] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(UpperCamelCase_ ) else: return cls() def _a (self , _lowerCamelCase ): """simple docstring""" if path.exists(): with open(UpperCamelCase_ , encoding="""utf-8""" ) as readme_file: UpperCAmelCase__ : Union[str, Any] = readme_file.read() else: UpperCAmelCase__ : Union[str, Any] = None UpperCAmelCase__ : int = self._to_readme(UpperCamelCase_ ) with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as readme_file: readme_file.write(UpperCamelCase_ ) def _a (self , _lowerCamelCase = None ): """simple docstring""" if readme_content is not None: UpperCAmelCase__ : List[str] = _split_yaml_from_readme(UpperCamelCase_ ) UpperCAmelCase__ : Dict = "---\n" + self.to_yaml_string() + "---\n" + content else: UpperCAmelCase__ : Dict = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def _a (cls , _lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Dict = yaml.load(UpperCamelCase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields UpperCAmelCase__ : Dict = { (key.replace("""-""" , """_""" ) if key.replace("""-""" , """_""" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**UpperCamelCase_ ) def _a (self ): """simple docstring""" return yaml.safe_dump( { (key.replace("""_""" , """-""" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=UpperCamelCase_ , allow_unicode=UpperCamelCase_ , encoding="""utf-8""" , ).decode("""utf-8""" ) _A = { """image-classification""": [], """translation""": [], """image-segmentation""": [], """fill-mask""": [], """automatic-speech-recognition""": [], """token-classification""": [], """sentence-similarity""": [], """audio-classification""": [], """question-answering""": [], """summarization""": [], """zero-shot-classification""": [], """table-to-text""": [], """feature-extraction""": [], """other""": [], """multiple-choice""": [], """text-classification""": [], """text-to-image""": [], """text2text-generation""": [], """zero-shot-image-classification""": [], """tabular-classification""": [], """tabular-regression""": [], """image-to-image""": [], """tabular-to-text""": [], """unconditional-image-generation""": [], """text-retrieval""": [], """text-to-speech""": [], """object-detection""": [], """audio-to-audio""": [], """text-generation""": [], """conversational""": [], """table-question-answering""": [], """visual-question-answering""": [], """image-to-text""": [], """reinforcement-learning""": [], """voice-activity-detection""": [], """time-series-forecasting""": [], """document-question-answering""": [], } if __name__ == "__main__": from argparse import ArgumentParser _A = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""") ap.add_argument("""readme_filepath""") _A = ap.parse_args() _A = Path(args.readme_filepath) _A = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
182
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
0
import argparse import glob import logging import os import time from argparse import Namespace import numpy as np import torch from lightning_base import BaseTransformer, add_generic_args, generic_train from torch.utils.data import DataLoader, TensorDataset from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes, glue_tasks_num_labels from transformers import glue_processors as processors UpperCAmelCase_ : Optional[int] = logging.getLogger(__name__) class UpperCamelCase ( _UpperCAmelCase ): lowerCAmelCase : List[str] = """sequence-classification""" def __init__( self , UpperCAmelCase__ ): if type(UpperCamelCase_ ) == dict: A__ = Namespace(**UpperCamelCase_ ) A__ = glue_output_modes[hparams.task] A__ = glue_tasks_num_labels[hparams.task] super().__init__(UpperCamelCase_ , UpperCamelCase_ , self.mode ) def __A ( self , **UpperCAmelCase__ ): return self.model(**UpperCamelCase_ ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None A__ = self(**UpperCamelCase_ ) A__ = outputs[0] A__ = self.trainer.lr_schedulers[0]["scheduler"] A__ = {"loss": loss, "rate": lr_scheduler.get_last_lr()[-1]} return {"loss": loss, "log": tensorboard_logs} def __A ( self ): A__ = self.hparams A__ = processors[args.task]() A__ = processor.get_labels() for mode in ["train", "dev"]: A__ = self._feature_file(UpperCamelCase_ ) if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache: logger.info("Loading features from cached file %s" , UpperCamelCase_ ) else: logger.info("Creating features from dataset file at %s" , args.data_dir ) A__ = ( processor.get_dev_examples(args.data_dir ) if mode == "dev" else processor.get_train_examples(args.data_dir ) ) A__ = convert_examples_to_features( UpperCamelCase_ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , ) logger.info("Saving features into cached file %s" , UpperCamelCase_ ) torch.save(UpperCamelCase_ , UpperCamelCase_ ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False ): A__ = "dev" if mode == "test" else mode A__ = self._feature_file(UpperCamelCase_ ) logger.info("Loading features from cached file %s" , UpperCamelCase_ ) A__ = torch.load(UpperCamelCase_ ) A__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long ) A__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long ) A__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long ) if self.hparams.glue_output_mode == "classification": A__ = torch.tensor([f.label for f in features] , dtype=torch.long ) elif self.hparams.glue_output_mode == "regression": A__ = torch.tensor([f.label for f in features] , dtype=torch.float ) return DataLoader( TensorDataset(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , batch_size=UpperCamelCase_ , shuffle=UpperCamelCase_ , ) def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ ): A__ = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if self.config.model_type not in ["distilbert", "bart"]: A__ = batch[2] if self.config.model_type in ["bert", "xlnet", "albert"] else None A__ = self(**UpperCamelCase_ ) A__ = outputs[:2] A__ = logits.detach().cpu().numpy() A__ = inputs["labels"].detach().cpu().numpy() return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids} def __A ( self , UpperCAmelCase__ ): A__ = torch.stack([x["val_loss"] for x in outputs] ).mean().detach().cpu().item() A__ = np.concatenate([x["pred"] for x in outputs] , axis=0 ) if self.hparams.glue_output_mode == "classification": A__ = np.argmax(UpperCamelCase_ , axis=1 ) elif self.hparams.glue_output_mode == "regression": A__ = np.squeeze(UpperCamelCase_ ) A__ = np.concatenate([x["target"] for x in outputs] , axis=0 ) A__ = [[] for _ in range(out_label_ids.shape[0] )] A__ = [[] for _ in range(out_label_ids.shape[0] )] A__ = {**{"val_loss": val_loss_mean}, **compute_metrics(self.hparams.task , UpperCamelCase_ , UpperCamelCase_ )} A__ = dict(results.items() ) A__ = results return ret, preds_list, out_label_list def __A ( self , UpperCAmelCase__ ): A__ = self._eval_end(UpperCamelCase_ ) A__ = ret["log"] return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs} def __A ( self , UpperCAmelCase__ ): A__ = self._eval_end(UpperCamelCase_ ) A__ = ret["log"] # `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss` return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs} @staticmethod def __A ( UpperCAmelCase__ , UpperCAmelCase__ ): BaseTransformer.add_model_specific_args(UpperCamelCase_ , UpperCamelCase_ ) parser.add_argument( "--max_seq_length" , default=128 , type=UpperCamelCase_ , help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) , ) parser.add_argument( "--task" , default="" , type=UpperCamelCase_ , required=UpperCamelCase_ , help="The GLUE task to run" , ) parser.add_argument( "--gpus" , default=0 , type=UpperCamelCase_ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , ) parser.add_argument( "--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" ) return parser def UpperCamelCase ( )-> Union[str, Any]: """simple docstring""" A__ = argparse.ArgumentParser() add_generic_args(_A , os.getcwd() ) A__ = GLUETransformer.add_model_specific_args(_A , os.getcwd() ) A__ = parser.parse_args() # If output_dir not provided, a folder will be generated in pwd if args.output_dir is None: A__ = os.path.join( "./results" , f"""{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}""" , ) os.makedirs(args.output_dir ) A__ = GLUETransformer(_A ) A__ = generic_train(_A , _A ) # Optionally, predict on dev set and write to output_dir if args.do_predict: A__ = sorted(glob.glob(os.path.join(args.output_dir , "checkpoint-epoch=*.ckpt" ) , recursive=_A ) ) A__ = model.load_from_checkpoint(checkpoints[-1] ) return trainer.test(_A ) if __name__ == "__main__": main()
491
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
0
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = { """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def _lowerCamelCase ( __lowerCamelCase ) -> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {} state_dict.pop("""pixel_mean""" , __lowerCamelCase ) state_dict.pop("""pixel_std""" , __lowerCamelCase ) UpperCAmelCase__ : Any = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: UpperCAmelCase__ : Union[str, Any] = key.replace(__lowerCamelCase , __lowerCamelCase ) if re.match(__lowerCamelCase , __lowerCamelCase ): UpperCAmelCase__ : int = int(re.match(__lowerCamelCase , __lowerCamelCase ).group(2 ) ) if layer_nb == 0: UpperCAmelCase__ : Dict = key.replace("""layers.0""" , """proj_in""" ) elif layer_nb == 1: UpperCAmelCase__ : int = key.replace("""layers.1""" , """layers.0""" ) elif layer_nb == 2: UpperCAmelCase__ : Union[str, Any] = key.replace("""layers.2""" , """proj_out""" ) UpperCAmelCase__ : Tuple = value UpperCAmelCase__ : Any = model_state_dict[ "prompt_encoder.shared_embedding.positional_embedding" ] return model_state_dict def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase="ybelkada/segment-anything" ) -> Dict: '''simple docstring''' UpperCAmelCase__ : int = hf_hub_download(__lowerCamelCase , F"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: UpperCAmelCase__ : List[Any] = SamConfig() elif "sam_vit_l" in model_name: UpperCAmelCase__ : int = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) UpperCAmelCase__ : Dict = SamConfig( vision_config=__lowerCamelCase , ) elif "sam_vit_h" in model_name: UpperCAmelCase__ : str = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) UpperCAmelCase__ : List[Any] = SamConfig( vision_config=__lowerCamelCase , ) UpperCAmelCase__ : str = torch.load(__lowerCamelCase , map_location="""cpu""" ) UpperCAmelCase__ : Optional[Any] = replace_keys(__lowerCamelCase ) UpperCAmelCase__ : Tuple = SamImageProcessor() UpperCAmelCase__ : Optional[Any] = SamProcessor(image_processor=__lowerCamelCase ) UpperCAmelCase__ : List[Any] = SamModel(__lowerCamelCase ) hf_model.load_state_dict(__lowerCamelCase ) UpperCAmelCase__ : Any = hf_model.to("""cuda""" ) UpperCAmelCase__ : Tuple = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png" UpperCAmelCase__ : int = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ).convert("""RGB""" ) UpperCAmelCase__ : List[Any] = [[[400, 650]]] UpperCAmelCase__ : str = [[1]] UpperCAmelCase__ : List[Any] = processor(images=np.array(__lowerCamelCase ) , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): UpperCAmelCase__ : int = hf_model(**__lowerCamelCase ) UpperCAmelCase__ : int = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_890_251_159_668 UpperCAmelCase__ : str = processor( images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): UpperCAmelCase__ : List[Any] = hf_model(**__lowerCamelCase ) UpperCAmelCase__ : int = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_712_603_092_193_604 UpperCAmelCase__ : Dict = ((75, 275, 1725, 850),) UpperCAmelCase__ : List[Any] = processor(images=np.array(__lowerCamelCase ) , input_boxes=__lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): UpperCAmelCase__ : Tuple = hf_model(**__lowerCamelCase ) UpperCAmelCase__ : int = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_686_015_605_926_514 # Test with 2 points and 1 image. UpperCAmelCase__ : List[str] = [[[400, 650], [800, 650]]] UpperCAmelCase__ : int = [[1, 1]] UpperCAmelCase__ : List[Any] = processor( images=np.array(__lowerCamelCase ) , input_points=__lowerCamelCase , input_labels=__lowerCamelCase , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): UpperCAmelCase__ : Optional[int] = hf_model(**__lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_936_047_792_434_692 if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() SCREAMING_SNAKE_CASE__ : str = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) SCREAMING_SNAKE_CASE__ : Dict = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
79
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
0
def lowercase ( a = 10**12 ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :Tuple = 1 SCREAMING_SNAKE_CASE_ :List[Any] = 0 SCREAMING_SNAKE_CASE_ :str = 1 SCREAMING_SNAKE_CASE_ :Union[str, Any] = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'''{solution() = }''')
631
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.linear_k": "encoder.layers.*.self_attn.linear_k", "self_attn.linear_v": "encoder.layers.*.self_attn.linear_v", "self_attn.linear_q": "encoder.layers.*.self_attn.linear_q", "self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u", "self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v", "self_attn.linear_out": "encoder.layers.*.self_attn.linear_out", "self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos", "self_attn.rotary_emb": "encoder.embed_positions", "self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm", "conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1", "conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2", "conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv", "conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm", "conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm", "ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense", "ffn1.w_2": "encoder.layers.*.ffn1.output_dense", "ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm", "ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense", "ffn2.w_2": "encoder.layers.*.ffn2.output_dense", "ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } SCREAMING_SNAKE_CASE__ : Any = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' for attribute in key.split('.' ): SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) if weight_type is not None: SCREAMING_SNAKE_CASE_ = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape else: SCREAMING_SNAKE_CASE_ = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' F' {value.shape} for {full_name}' ) if weight_type == "weight": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_g": SCREAMING_SNAKE_CASE_ = value elif weight_type == "weight_v": SCREAMING_SNAKE_CASE_ = value elif weight_type == "bias": SCREAMING_SNAKE_CASE_ = value elif weight_type == "running_mean": SCREAMING_SNAKE_CASE_ = value elif weight_type == "running_var": SCREAMING_SNAKE_CASE_ = value elif weight_type == "num_batches_tracked": SCREAMING_SNAKE_CASE_ = value elif weight_type == "inv_freq": SCREAMING_SNAKE_CASE_ = value else: SCREAMING_SNAKE_CASE_ = value logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict() SCREAMING_SNAKE_CASE_ = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): SCREAMING_SNAKE_CASE_ = False if "conv_layers" in name: load_conv_layer( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) SCREAMING_SNAKE_CASE_ = True else: for key, mapped_key in MAPPING.items(): SCREAMING_SNAKE_CASE_ = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: SCREAMING_SNAKE_CASE_ = True if "*" in mapped_key: SCREAMING_SNAKE_CASE_ = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2] SCREAMING_SNAKE_CASE_ = mapped_key.replace('*' , SCREAMING_SNAKE_CASE ) if "pos_bias_u" in name: SCREAMING_SNAKE_CASE_ = None elif "pos_bias_v" in name: SCREAMING_SNAKE_CASE_ = None elif "weight_g" in name: SCREAMING_SNAKE_CASE_ = "weight_g" elif "weight_v" in name: SCREAMING_SNAKE_CASE_ = "weight_v" elif "bias" in name: SCREAMING_SNAKE_CASE_ = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj SCREAMING_SNAKE_CASE_ = "weight" elif "running_mean" in name: SCREAMING_SNAKE_CASE_ = "running_mean" elif "inv_freq" in name: SCREAMING_SNAKE_CASE_ = "inv_freq" elif "running_var" in name: SCREAMING_SNAKE_CASE_ = "running_var" elif "num_batches_tracked" in name: SCREAMING_SNAKE_CASE_ = "num_batches_tracked" else: SCREAMING_SNAKE_CASE_ = None set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(SCREAMING_SNAKE_CASE ) logger.warning(F'Unused weights: {unused_weights}' ) def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE_ = full_name.split('conv_layers.' )[-1] SCREAMING_SNAKE_CASE_ = name.split('.' ) SCREAMING_SNAKE_CASE_ = int(items[0] ) SCREAMING_SNAKE_CASE_ = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'{full_name} has size {value.shape}, but' F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' ) SCREAMING_SNAKE_CASE_ = value logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(SCREAMING_SNAKE_CASE ) @torch.no_grad() def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True ) -> Optional[int]: '''simple docstring''' if config_path is not None: SCREAMING_SNAKE_CASE_ = WavaVecaConformerConfig.from_pretrained(SCREAMING_SNAKE_CASE , hidden_act='swish' ) else: SCREAMING_SNAKE_CASE_ = WavaVecaConformerConfig() if "rope" in checkpoint_path: SCREAMING_SNAKE_CASE_ = "rotary" if is_finetuned: if dict_path: SCREAMING_SNAKE_CASE_ = Dictionary.load(SCREAMING_SNAKE_CASE ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq SCREAMING_SNAKE_CASE_ = target_dict.pad_index SCREAMING_SNAKE_CASE_ = target_dict.bos_index SCREAMING_SNAKE_CASE_ = target_dict.eos_index SCREAMING_SNAKE_CASE_ = len(target_dict.symbols ) SCREAMING_SNAKE_CASE_ = os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) if not os.path.isdir(SCREAMING_SNAKE_CASE ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE ) ) return os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = target_dict.indices # fairseq has the <pad> and <s> switched SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 1 with open(SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer( SCREAMING_SNAKE_CASE , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = True if config.feat_extract_norm == "layer" else False SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , ) SCREAMING_SNAKE_CASE_ = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE ) processor.save_pretrained(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = WavaVecaConformerForCTC(SCREAMING_SNAKE_CASE ) else: SCREAMING_SNAKE_CASE_ = WavaVecaConformerForPreTraining(SCREAMING_SNAKE_CASE ) if is_finetuned: SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: SCREAMING_SNAKE_CASE_ = argparse.Namespace(task='audio_pretraining' ) SCREAMING_SNAKE_CASE_ = fairseq.tasks.setup_task(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = model[0].eval() recursively_load_weights(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , not is_finetuned ) hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
205
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
0
import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> List[str]: '''simple docstring''' lowerCamelCase__ = TapasConfig.from_json_file(__snake_case ) # set absolute/relative position embeddings parameter lowerCamelCase__ = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCamelCase__ = TapasForQuestionAnswering(config=__snake_case ) elif task == "WTQ": # run_task_main.py hparams lowerCamelCase__ = 4 lowerCamelCase__ = True # hparam_utils.py hparams lowerCamelCase__ = 0.6_6_4_6_9_4 lowerCamelCase__ = 0.2_0_7_9_5_1 lowerCamelCase__ = 0.1_2_1_1_9_4 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = False lowerCamelCase__ = 0.0_3_5_2_5_1_3 lowerCamelCase__ = TapasForQuestionAnswering(config=__snake_case ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCamelCase__ = 4 lowerCamelCase__ = False # hparam_utils.py hparams lowerCamelCase__ = 3_6.4_5_1_9 lowerCamelCase__ = 0.9_0_3_4_2_1 lowerCamelCase__ = 2_2_2.0_8_8 lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = True lowerCamelCase__ = 0.7_6_3_1_4_1 lowerCamelCase__ = TapasForQuestionAnswering(config=__snake_case ) elif task == "TABFACT": lowerCamelCase__ = TapasForSequenceClassification(config=__snake_case ) elif task == "MLM": lowerCamelCase__ = TapasForMaskedLM(config=__snake_case ) elif task == "INTERMEDIATE_PRETRAINING": lowerCamelCase__ = TapasModel(config=__snake_case ) else: raise ValueError(F'Task {task} not supported.' ) print(F'Building PyTorch model from configuration: {config}' ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__snake_case ,__snake_case ,__snake_case ) # Save pytorch-model (weights and configuration) print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(__snake_case ) # Save tokenizer files print(F'Save tokenizer files to {pytorch_dump_path}' ) lowerCamelCase__ = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + '''vocab.txt''' ,model_max_length=512 ) tokenizer.save_pretrained(__snake_case ) print('''Used relative position embeddings:''' ,model.config.reset_position_index_per_cell ) if __name__ == "__main__": _a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _a = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
481
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : List[Any] = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class UpperCamelCase ( __a ): a__ :Dict = '''yolos''' def __init__(self , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3_072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1E-1_2 , __UpperCamelCase=[512, 864] , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=100 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , **__UpperCamelCase , ) -> int: super().__init__(**UpperCamelCase_ ) UpperCamelCase_ : Tuple = hidden_size UpperCamelCase_ : List[str] = num_hidden_layers UpperCamelCase_ : Tuple = num_attention_heads UpperCamelCase_ : int = intermediate_size UpperCamelCase_ : int = hidden_act UpperCamelCase_ : int = hidden_dropout_prob UpperCamelCase_ : Dict = attention_probs_dropout_prob UpperCamelCase_ : List[Any] = initializer_range UpperCamelCase_ : int = layer_norm_eps UpperCamelCase_ : List[str] = image_size UpperCamelCase_ : Tuple = patch_size UpperCamelCase_ : int = num_channels UpperCamelCase_ : Optional[Any] = qkv_bias UpperCamelCase_ : List[Any] = num_detection_tokens UpperCamelCase_ : Union[str, Any] = use_mid_position_embeddings UpperCamelCase_ : List[str] = auxiliary_loss # Hungarian matcher UpperCamelCase_ : Optional[Any] = class_cost UpperCamelCase_ : Union[str, Any] = bbox_cost UpperCamelCase_ : int = giou_cost # Loss coefficients UpperCamelCase_ : Optional[int] = bbox_loss_coefficient UpperCamelCase_ : Union[str, Any] = giou_loss_coefficient UpperCamelCase_ : Any = eos_coefficient class UpperCamelCase ( __a ): a__ :str = version.parse('''1.11''' ) @property def A_ (self ) -> Any: return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def A_ (self ) -> List[Any]: return 1E-4 @property def A_ (self ) -> Optional[Any]: return 12
635
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
0
import argparse from collections import OrderedDict from pathlib import Path import torch from transformers import ( VisualBertConfig, VisualBertForMultipleChoice, VisualBertForPreTraining, VisualBertForQuestionAnswering, VisualBertForVisualReasoning, ) from transformers.utils import logging logging.set_verbosity_info() snake_case__ : List[Any] = logging.get_logger(__name__) snake_case__ : List[str] = [ ('bert.bert', 'visual_bert'), ('bert.cls', 'cls'), ('bert.classifier', 'cls'), ('token_type_embeddings_visual', 'visual_token_type_embeddings'), ('position_embeddings_visual', 'visual_position_embeddings'), ('projection', 'visual_projection'), ] snake_case__ : Any = [ 'nlvr2_coco_pre_trained.th', 'nlvr2_fine_tuned.th', 'nlvr2_pre_trained.th', 'vcr_coco_pre_train.th', 'vcr_fine_tune.th', 'vcr_pre_train.th', 'vqa_coco_pre_trained.th', 'vqa_fine_tuned.th', 'vqa_pre_trained.th', ] def lowerCamelCase__ ( _lowerCamelCase ) ->List[Any]: _UpperCAmelCase =torch.load(_lowerCamelCase , map_location="cpu" ) return sd def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=rename_keys_prefix ) ->List[str]: _UpperCAmelCase =OrderedDict() _UpperCAmelCase =torch.arange(config.max_position_embeddings ).expand((1, -1) ) # detector_d = OrderedDict() for key in d: if "detector" in key: # detector_d[key.replace('detector.','')] = d[key] continue _UpperCAmelCase =key for name_pair in rename_keys_prefix: _UpperCAmelCase =new_key.replace(name_pair[0] , name_pair[1] ) _UpperCAmelCase =d[key] if key == "bert.cls.predictions.decoder.weight": # Old bert code didn't have `decoder.bias`, but was added separately _UpperCAmelCase =new_d["cls.predictions.bias"] return new_d @torch.no_grad() def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]: assert ( checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS ), F"The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}." # Get Config if "pre" in checkpoint_path: _UpperCAmelCase ="pretraining" if "vcr" in checkpoint_path: _UpperCAmelCase ={"visual_embedding_dim": 512} elif "vqa_advanced" in checkpoint_path: _UpperCAmelCase ={"visual_embedding_dim": 2048} elif "vqa" in checkpoint_path: _UpperCAmelCase ={"visual_embedding_dim": 2048} elif "nlvr" in checkpoint_path: _UpperCAmelCase ={"visual_embedding_dim": 1024} else: raise NotImplementedError(F"No implementation found for `{checkpoint_path}`." ) else: if "vcr" in checkpoint_path: _UpperCAmelCase ={"visual_embedding_dim": 512} _UpperCAmelCase ="multichoice" elif "vqa_advanced" in checkpoint_path: _UpperCAmelCase ={"visual_embedding_dim": 2048} _UpperCAmelCase ="vqa_advanced" elif "vqa" in checkpoint_path: _UpperCAmelCase ={"visual_embedding_dim": 2048, "num_labels": 3129} _UpperCAmelCase ="vqa" elif "nlvr" in checkpoint_path: _UpperCAmelCase ={ "visual_embedding_dim": 1024, "num_labels": 2, } _UpperCAmelCase ="nlvr" _UpperCAmelCase =VisualBertConfig(**_lowerCamelCase ) # Load State Dict _UpperCAmelCase =load_state_dict(_lowerCamelCase ) _UpperCAmelCase =get_new_dict(_lowerCamelCase , _lowerCamelCase ) if model_type == "pretraining": _UpperCAmelCase =VisualBertForPreTraining(_lowerCamelCase ) elif model_type == "vqa": _UpperCAmelCase =VisualBertForQuestionAnswering(_lowerCamelCase ) elif model_type == "nlvr": _UpperCAmelCase =VisualBertForVisualReasoning(_lowerCamelCase ) elif model_type == "multichoice": _UpperCAmelCase =VisualBertForMultipleChoice(_lowerCamelCase ) model.load_state_dict(_lowerCamelCase ) # Save Checkpoints Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase ) model.save_pretrained(_lowerCamelCase ) if __name__ == "__main__": snake_case__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.') parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.') snake_case__ : List[str] = parser.parse_args() convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
408
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
0
def snake_case (UpperCAmelCase__ ) -> str: UpperCamelCase_: List[str] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def snake_case (UpperCAmelCase__ ) -> dict[str, str]: UpperCamelCase_: int = [chr(i + 6_5 ) for i in range(2_6 )] # Remove duplicate characters from key UpperCamelCase_: List[Any] = remove_duplicates(key.upper() ) UpperCamelCase_: Dict = len(UpperCAmelCase__ ) # First fill cipher with key characters UpperCamelCase_: int = {alphabet[i]: char for i, char in enumerate(UpperCAmelCase__ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(UpperCAmelCase__ ) , 2_6 ): UpperCamelCase_: Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase_: List[Any] = alphabet[i - offset] UpperCamelCase_: Optional[Any] = char return cipher_alphabet def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str: return "".join(cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> str: UpperCamelCase_: str = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(UpperCAmelCase__ , UpperCAmelCase__ ) for ch in message.upper() ) def snake_case () -> None: UpperCamelCase_: Any = input('Enter message to encode or decode: ' ).strip() UpperCamelCase_: str = input('Enter keyword: ' ).strip() UpperCamelCase_: Optional[Any] = input('Encipher or decipher? E/D:' ).strip()[0].lower() try: UpperCamelCase_: List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError('invalid input option' ) UpperCamelCase_: Dict = create_cipher_map(UpperCAmelCase__ ) print(func(UpperCAmelCase__ , UpperCAmelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
57
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
0
'''simple docstring''' from __future__ import annotations from collections.abc import Callable def lowerCamelCase ( _snake_case : Tuple ,_snake_case : Dict ,_snake_case : Tuple ,_snake_case : Any = 100 ,): '''simple docstring''' lowercase__ = x_start lowercase__ = fnc(_snake_case ) lowercase__ = 0.0 for _ in range(_snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area lowercase__ = (x_end - x_start) / steps + xa lowercase__ = fnc(_snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step lowercase__ = xa lowercase__ = fxa return area if __name__ == "__main__": def lowerCamelCase ( _snake_case : Optional[int] ): '''simple docstring''' return x**3 + x**2 print("f(x) = x^3 + x^2") print("The area between the curve, x = -5, x = 5 and the x axis is:") SCREAMING_SNAKE_CASE__ = 10 while i <= 10_0000: print(f'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''') i *= 10
267
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
0
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=a_ ) class _UpperCAmelCase ( a_ ): """simple docstring""" # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __snake_case = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __snake_case = Features({"""text""": Value("""string""" )} ) __snake_case = Features({"""labels""": ClassLabel} ) __snake_case = """text""" __snake_case = """labels""" def a__ ( self , _lowercase ) -> List[str]: if self.label_column not in features: raise ValueError(F'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , UpperCamelCase_ ): raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' ) _lowerCamelCase : int = copy.deepcopy(self ) _lowerCamelCase : Optional[int] = self.label_schema.copy() _lowerCamelCase : Any = features[self.label_column] _lowerCamelCase : List[str] = label_schema return task_template @property def a__ ( self ) -> List[str]: return { self.text_column: "text", self.label_column: "labels", }
434
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
0
"""simple docstring""" import argparse import os import re _A = """src/transformers""" # Pattern that looks at the indentation in a line. _A = re.compile(R"""^(\s*)\S""") # Pattern that matches `"key":" and puts `key` in group 0. _A = re.compile(R"""^\s*\"([^\"]+)\":""") # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. _A = re.compile(R"""^\s*_import_structure\[\"([^\"]+)\"\]""") # Pattern that matches `"key",` and puts `key` in group 0. _A = re.compile(R"""^\s*\"([^\"]+)\",\s*$""") # Pattern that matches any `[stuff]` and puts `stuff` in group 0. _A = re.compile(R"""\[([^\]]+)\]""") def a__ ( lowerCAmelCase ) -> Optional[Any]: UpperCAmelCase__ : Any = _re_indent.search(lowerCAmelCase ) return "" if search is None else search.groups()[0] def a__ ( lowerCAmelCase , lowerCAmelCase="" , lowerCAmelCase=None , lowerCAmelCase=None ) -> Any: UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : Union[str, Any] = code.split("""\n""" ) if start_prompt is not None: while not lines[index].startswith(lowerCAmelCase ): index += 1 UpperCAmelCase__ : Optional[Any] = ["\n".join(lines[:index] )] else: UpperCAmelCase__ : Dict = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). UpperCAmelCase__ : Any = [lines[index]] index += 1 while index < len(lowerCAmelCase ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase )): if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level: if len(lowerCAmelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ): current_block.append(lines[index] ) blocks.append("""\n""".join(lowerCAmelCase ) ) if index < len(lowerCAmelCase ) - 1: UpperCAmelCase__ : Optional[Any] = [lines[index + 1]] index += 1 else: UpperCAmelCase__ : Optional[Any] = [] else: blocks.append("""\n""".join(lowerCAmelCase ) ) UpperCAmelCase__ : str = [lines[index]] else: current_block.append(lines[index] ) index += 1 # Adds current block if it's nonempty. if len(lowerCAmelCase ) > 0: blocks.append("""\n""".join(lowerCAmelCase ) ) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lowerCAmelCase ): blocks.append("""\n""".join(lines[index:] ) ) return blocks def a__ ( lowerCAmelCase ) -> Optional[int]: def _inner(lowerCAmelCase ): return key(lowerCAmelCase ).lower().replace("""_""" , """""" ) return _inner def a__ ( lowerCAmelCase , lowerCAmelCase=None ) -> Optional[int]: # If no key is provided, we use a noop. def noop(lowerCAmelCase ): return x if key is None: UpperCAmelCase__ : Optional[Any] = noop # Constants are all uppercase, they go first. UpperCAmelCase__ : Optional[int] = [obj for obj in objects if key(lowerCAmelCase ).isupper()] # Classes are not all uppercase but start with a capital, they go second. UpperCAmelCase__ : Tuple = [obj for obj in objects if key(lowerCAmelCase )[0].isupper() and not key(lowerCAmelCase ).isupper()] # Functions begin with a lowercase, they go last. UpperCAmelCase__ : int = [obj for obj in objects if not key(lowerCAmelCase )[0].isupper()] UpperCAmelCase__ : Dict = ignore_underscore(lowerCAmelCase ) return sorted(lowerCAmelCase , key=lowerCAmelCase ) + sorted(lowerCAmelCase , key=lowerCAmelCase ) + sorted(lowerCAmelCase , key=lowerCAmelCase ) def a__ ( lowerCAmelCase ) -> List[Any]: # This inner function sort imports between [ ]. def _replace(lowerCAmelCase ): UpperCAmelCase__ : Dict = match.groups()[0] if "," not in imports: return F"""[{imports}]""" UpperCAmelCase__ : Optional[int] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase__ : Dict = keys[:-1] return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCAmelCase )] ) + "]" UpperCAmelCase__ : Optional[int] = import_statement.split("""\n""" ) if len(lowerCAmelCase ) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. UpperCAmelCase__ : Any = 2 if lines[1].strip() == "[" else 1 UpperCAmelCase__ : Union[str, Any] = [(i, _re_strip_line.search(lowerCAmelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )] UpperCAmelCase__ : Dict = sort_objects(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] ) UpperCAmelCase__ : List[str] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] ) elif len(lowerCAmelCase ) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1] ) is not None: UpperCAmelCase__ : Dict = _re_bracket_content.sub(_replace , lines[1] ) else: UpperCAmelCase__ : str = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )] # We will have a final empty element if the line finished with a comma. if len(keys[-1] ) == 0: UpperCAmelCase__ : str = keys[:-1] UpperCAmelCase__ : Optional[Any] = get_indent(lines[1] ) + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowerCAmelCase )] ) return "\n".join(lowerCAmelCase ) else: # Finally we have to deal with imports fitting on one line UpperCAmelCase__ : Optional[Any] = _re_bracket_content.sub(_replace , lowerCAmelCase ) return import_statement def a__ ( lowerCAmelCase , lowerCAmelCase=True ) -> Dict: with open(lowerCAmelCase , encoding="""utf-8""" ) as f: UpperCAmelCase__ : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 UpperCAmelCase__ : Optional[Any] = split_code_in_indented_blocks( lowerCAmelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" ) # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(lowerCAmelCase ) - 1 ): # Check if the block contains some `_import_structure`s thingy to sort. UpperCAmelCase__ : Dict = main_blocks[block_idx] UpperCAmelCase__ : str = block.split("""\n""" ) # Get to the start of the imports. UpperCAmelCase__ : List[Any] = 0 while line_idx < len(lowerCAmelCase ) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: UpperCAmelCase__ : Optional[int] = len(lowerCAmelCase ) else: line_idx += 1 if line_idx >= len(lowerCAmelCase ): continue # Ignore beginning and last line: they don't contain anything. UpperCAmelCase__ : Any = "\n".join(block_lines[line_idx:-1] ) UpperCAmelCase__ : Optional[Any] = get_indent(block_lines[1] ) # Slit the internal block into blocks of indent level 1. UpperCAmelCase__ : List[str] = split_code_in_indented_blocks(lowerCAmelCase , indent_level=lowerCAmelCase ) # We have two categories of import key: list or _import_structure[key].append/extend UpperCAmelCase__ : Union[str, Any] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. UpperCAmelCase__ : List[str] = [(pattern.search(lowerCAmelCase ).groups()[0] if pattern.search(lowerCAmelCase ) is not None else None) for b in internal_blocks] # We only sort the lines with a key. UpperCAmelCase__ : str = [(i, key) for i, key in enumerate(lowerCAmelCase ) if key is not None] UpperCAmelCase__ : Optional[int] = [x[0] for x in sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x[1] )] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. UpperCAmelCase__ : Optional[int] = 0 UpperCAmelCase__ : List[str] = [] for i in range(len(lowerCAmelCase ) ): if keys[i] is None: reorderded_blocks.append(internal_blocks[i] ) else: UpperCAmelCase__ : str = sort_objects_in_import(internal_blocks[sorted_indices[count]] ) reorderded_blocks.append(lowerCAmelCase ) count += 1 # And we put our main block back together with its first and last line. UpperCAmelCase__ : List[str] = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] ) if code != "\n".join(lowerCAmelCase ): if check_only: return True else: print(F"""Overwriting {file}.""" ) with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write("""\n""".join(lowerCAmelCase ) ) def a__ ( lowerCAmelCase=True ) -> Any: UpperCAmelCase__ : Optional[Any] = [] for root, _, files in os.walk(lowerCAmelCase ): if "__init__.py" in files: UpperCAmelCase__ : Optional[Any] = sort_imports(os.path.join(lowerCAmelCase , """__init__.py""" ) , check_only=lowerCAmelCase ) if result: UpperCAmelCase__ : Tuple = [os.path.join(lowerCAmelCase , """__init__.py""" )] if len(lowerCAmelCase ) > 0: raise ValueError(F"""Would overwrite {len(lowerCAmelCase )} files, run `make style`.""" ) if __name__ == "__main__": _A = argparse.ArgumentParser() parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""") _A = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
182
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
0
from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCamelCase ( _UpperCAmelCase ): lowerCAmelCase : Optional[int] = """Salesforce/blip-image-captioning-base""" lowerCAmelCase : List[str] = ( """This is a tool that generates a description of an image. It takes an input named `image` which should be the """ """image to caption, and returns a text that contains the description in English.""" ) lowerCAmelCase : List[Any] = """image_captioner""" lowerCAmelCase : str = AutoModelForVisionaSeq lowerCAmelCase : int = ["""image"""] lowerCAmelCase : int = ["""text"""] def __init__( self , *UpperCAmelCase__ , **UpperCAmelCase__ ): requires_backends(self , ["vision"] ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) def __A ( self , UpperCAmelCase__ ): return self.pre_processor(images=UpperCamelCase_ , return_tensors="pt" ) def __A ( self , UpperCAmelCase__ ): return self.model.generate(**UpperCamelCase_ ) def __A ( self , UpperCAmelCase__ ): return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0].strip()
491
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
0
def _lowerCamelCase ( __lowerCamelCase ) -> int: '''simple docstring''' UpperCAmelCase__ : list[list[int]] = [[0 for _ in range(__lowerCamelCase )] for _ in range(m + 1 )] for i in range(m + 1 ): UpperCAmelCase__ : Tuple = 1 for n in range(m + 1 ): for k in range(1 , __lowerCamelCase ): memo[n][k] += memo[n][k - 1] if n - k > 0: memo[n][k] += memo[n - k - 1][k] return memo[m][m - 1] if __name__ == "__main__": import sys if len(sys.argv) == 1: try: SCREAMING_SNAKE_CASE__ : str = int(input("""Enter a number: """).strip()) print(partition(n)) except ValueError: print("""Please enter a number.""") else: try: SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(sys.argv[1]) print(partition(n)) except ValueError: print("""Please pass a number.""")
79
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _UpperCAmelCase ( lowercase ): lowerCamelCase_ : Any = 4_2 class _UpperCAmelCase ( lowercase , lowercase ): @register_to_config def __init__( self : Optional[int] , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 20 , UpperCAmelCase : int = 7_68 , UpperCAmelCase : List[Any]=77 , UpperCAmelCase : Dict=4 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : str = "silu" , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "linear" , UpperCAmelCase : Optional[str] = "prd" , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , ): super().__init__() SCREAMING_SNAKE_CASE_ :int = num_attention_heads SCREAMING_SNAKE_CASE_ :int = attention_head_dim SCREAMING_SNAKE_CASE_ :Tuple = num_attention_heads * attention_head_dim SCREAMING_SNAKE_CASE_ :List[str] = additional_embeddings SCREAMING_SNAKE_CASE_ :int = time_embed_dim or inner_dim SCREAMING_SNAKE_CASE_ :List[str] = embedding_proj_dim or embedding_dim SCREAMING_SNAKE_CASE_ :Optional[Any] = clip_embed_dim or embedding_dim SCREAMING_SNAKE_CASE_ :Tuple = Timesteps(UpperCamelCase_ , UpperCamelCase_ , 0) SCREAMING_SNAKE_CASE_ :List[str] = TimestepEmbedding(UpperCamelCase_ , UpperCamelCase_ , out_dim=UpperCamelCase_ , act_fn=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :int = nn.Linear(UpperCamelCase_ , UpperCamelCase_) if embedding_proj_norm_type is None: SCREAMING_SNAKE_CASE_ :Union[str, Any] = None elif embedding_proj_norm_type == "layer": SCREAMING_SNAKE_CASE_ :str = nn.LayerNorm(UpperCamelCase_) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}") SCREAMING_SNAKE_CASE_ :List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_) if encoder_hid_proj_type is None: SCREAMING_SNAKE_CASE_ :List[str] = None elif encoder_hid_proj_type == "linear": SCREAMING_SNAKE_CASE_ :List[str] = nn.Linear(UpperCamelCase_ , UpperCamelCase_) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}") SCREAMING_SNAKE_CASE_ :List[Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase_)) if added_emb_type == "prd": SCREAMING_SNAKE_CASE_ :Union[str, Any] = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase_)) elif added_emb_type is None: SCREAMING_SNAKE_CASE_ :int = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.") SCREAMING_SNAKE_CASE_ :Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , dropout=UpperCamelCase_ , activation_fn="gelu" , attention_bias=UpperCamelCase_ , ) for d in range(UpperCamelCase_) ]) if norm_in_type == "layer": SCREAMING_SNAKE_CASE_ :List[Any] = nn.LayerNorm(UpperCamelCase_) elif norm_in_type is None: SCREAMING_SNAKE_CASE_ :Any = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}.") SCREAMING_SNAKE_CASE_ :List[str] = nn.LayerNorm(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Any = nn.Linear(UpperCamelCase_ , UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Any = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0) causal_attention_mask.triu_(1) SCREAMING_SNAKE_CASE_ :str = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , UpperCamelCase_ , persistent=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Tuple = nn.Parameter(torch.zeros(1 , UpperCamelCase_)) SCREAMING_SNAKE_CASE_ :Any = nn.Parameter(torch.zeros(1 , UpperCamelCase_)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _snake_case ( self : Optional[Any]): SCREAMING_SNAKE_CASE_ :Dict = {} def fn_recursive_add_processors(UpperCAmelCase : str , UpperCAmelCase : torch.nn.Module , UpperCAmelCase : Dict[str, AttentionProcessor]): if hasattr(UpperCamelCase_ , "set_processor"): SCREAMING_SNAKE_CASE_ :int = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , UpperCamelCase_ , UpperCamelCase_) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) return processors def _snake_case ( self : str , UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]]): SCREAMING_SNAKE_CASE_ :int = len(self.attn_processors.keys()) if isinstance(UpperCamelCase_ , UpperCamelCase_) and len(UpperCamelCase_) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(UpperCamelCase_)} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes.") def fn_recursive_attn_processor(UpperCAmelCase : str , UpperCAmelCase : torch.nn.Module , UpperCAmelCase : Any): if hasattr(UpperCamelCase_ , "set_processor"): if not isinstance(UpperCamelCase_ , UpperCamelCase_): module.set_processor(UpperCamelCase_) else: module.set_processor(processor.pop(F"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , UpperCamelCase_ , UpperCamelCase_) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def _snake_case ( self : str): self.set_attn_processor(AttnProcessor()) def _snake_case ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.BoolTensor] = None , UpperCAmelCase : bool = True , ): SCREAMING_SNAKE_CASE_ :int = hidden_states.shape[0] SCREAMING_SNAKE_CASE_ :Union[str, Any] = timestep if not torch.is_tensor(UpperCamelCase_): SCREAMING_SNAKE_CASE_ :Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(UpperCamelCase_) and len(timesteps.shape) == 0: SCREAMING_SNAKE_CASE_ :Union[str, Any] = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE_ :str = timesteps * torch.ones(UpperCamelCase_ , dtype=timesteps.dtype , device=timesteps.device) SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.time_proj(UpperCamelCase_) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. SCREAMING_SNAKE_CASE_ :Any = timesteps_projected.to(dtype=self.dtype) SCREAMING_SNAKE_CASE_ :Optional[int] = self.time_embedding(UpperCamelCase_) if self.embedding_proj_norm is not None: SCREAMING_SNAKE_CASE_ :List[str] = self.embedding_proj_norm(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.embedding_proj(UpperCamelCase_) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.encoder_hidden_states_proj(UpperCamelCase_) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set") SCREAMING_SNAKE_CASE_ :List[Any] = self.proj_in(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :str = self.positional_embedding.to(hidden_states.dtype) SCREAMING_SNAKE_CASE_ :int = [] SCREAMING_SNAKE_CASE_ :Union[str, Any] = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase_) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: SCREAMING_SNAKE_CASE_ :Union[str, Any] = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: SCREAMING_SNAKE_CASE_ :List[str] = hidden_states[:, None, :] SCREAMING_SNAKE_CASE_ :List[str] = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase_ , -1 , -1) additional_embeds.append(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Optional[int] = torch.cat( UpperCamelCase_ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens SCREAMING_SNAKE_CASE_ :List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: SCREAMING_SNAKE_CASE_ :str = F.pad( UpperCamelCase_ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) SCREAMING_SNAKE_CASE_ :Tuple = hidden_states + positional_embeddings if attention_mask is not None: SCREAMING_SNAKE_CASE_ :Dict = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 SCREAMING_SNAKE_CASE_ :str = F.pad(UpperCamelCase_ , (0, self.additional_embeddings) , value=0.0) SCREAMING_SNAKE_CASE_ :List[str] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) SCREAMING_SNAKE_CASE_ :List[Any] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: SCREAMING_SNAKE_CASE_ :int = self.norm_in(UpperCamelCase_) for block in self.transformer_blocks: SCREAMING_SNAKE_CASE_ :int = block(UpperCamelCase_ , attention_mask=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Any = self.norm_out(UpperCamelCase_) if self.prd_embedding is not None: SCREAMING_SNAKE_CASE_ :Dict = hidden_states[:, -1] else: SCREAMING_SNAKE_CASE_ :str = hidden_states[:, additional_embeddings_len:] SCREAMING_SNAKE_CASE_ :List[str] = self.proj_to_clip_embeddings(UpperCamelCase_) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase_) def _snake_case ( self : str , UpperCAmelCase : Dict): SCREAMING_SNAKE_CASE_ :str = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
631
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[int] = { "distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json", "distilbert-base-uncased-distilled-squad": ( "https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json" ), "distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json", "distilbert-base-cased-distilled-squad": ( "https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json" ), "distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json", "distilbert-base-multilingual-cased": ( "https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json" ), "distilbert-base-uncased-finetuned-sst-2-english": ( "https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json" ), } class a_ ( SCREAMING_SNAKE_CASE__ ): A = '''distilbert''' A = { '''hidden_size''': '''dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', } def __init__( self , SCREAMING_SNAKE_CASE=3_0522 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=4 * 768 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.2 , SCREAMING_SNAKE_CASE=0 , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = max_position_embeddings SCREAMING_SNAKE_CASE_ = sinusoidal_pos_embds SCREAMING_SNAKE_CASE_ = n_layers SCREAMING_SNAKE_CASE_ = n_heads SCREAMING_SNAKE_CASE_ = dim SCREAMING_SNAKE_CASE_ = hidden_dim SCREAMING_SNAKE_CASE_ = dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = qa_dropout SCREAMING_SNAKE_CASE_ = seq_classif_dropout super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ ) class a_ ( SCREAMING_SNAKE_CASE__ ): @property def A_( self ) -> Union[str, Any]: """simple docstring""" if self.task == "multiple-choice": SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "choice", 2: "sequence"} else: SCREAMING_SNAKE_CASE_ = {0: "batch", 1: "sequence"} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
205
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _a = logging.get_logger(__name__) _a = { "facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json", } class __A ( lowerCAmelCase , lowerCAmelCase ): '''simple docstring''' lowerCAmelCase_ = """convnextv2""" def __init__( self , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=4 , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.02 , __lowerCAmelCase=1E-12 , __lowerCAmelCase=0.0 , __lowerCAmelCase=2_2_4 , __lowerCAmelCase=None , __lowerCAmelCase=None , **__lowerCAmelCase , ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) lowerCamelCase__ = num_channels lowerCamelCase__ = patch_size lowerCamelCase__ = num_stages lowerCamelCase__ = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes lowerCamelCase__ = [3, 3, 9, 3] if depths is None else depths lowerCamelCase__ = hidden_act lowerCamelCase__ = initializer_range lowerCamelCase__ = layer_norm_eps lowerCamelCase__ = drop_path_rate lowerCamelCase__ = image_size lowerCamelCase__ = ["stem"] + [F'stage{idx}' for idx in range(1 , len(self.depths ) + 1 )] lowerCamelCase__ = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
481
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
0
from math import factorial class UpperCamelCase : def __init__(self , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]: UpperCamelCase_ : Tuple = real if isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase_ : Any = [1] * rank else: UpperCamelCase_ : List[str] = rank def __repr__(self ) -> Optional[int]: return ( f'''{self.real}+''' f'''{"+".join(str(UpperCamelCase_ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}''' ) def A_ (self ) -> Tuple: UpperCamelCase_ : Optional[Any] = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , UpperCamelCase_ ) def __add__(self , __UpperCamelCase ) -> str: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): return Dual(self.real + other , self.duals ) UpperCamelCase_ : List[str] = self.duals.copy() UpperCamelCase_ : Dict = other.duals.copy() if len(UpperCamelCase_ ) > len(UpperCamelCase_ ): o_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) ) elif len(UpperCamelCase_ ) < len(UpperCamelCase_ ): s_dual.extend([1] * (len(UpperCamelCase_ ) - len(UpperCamelCase_ )) ) UpperCamelCase_ : int = [] for i in range(len(UpperCamelCase_ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , UpperCamelCase_ ) a__ :List[Any] = __add__ def __sub__(self , __UpperCamelCase ) -> str: return self + other * -1 def __mul__(self , __UpperCamelCase ) -> Any: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase_ : str = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , UpperCamelCase_ ) UpperCamelCase_ : str = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , UpperCamelCase_ ) a__ :Any = __mul__ def __truediv__(self , __UpperCamelCase ) -> Union[str, Any]: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase_ : List[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , UpperCamelCase_ ) raise ValueError def __floordiv__(self , __UpperCamelCase ) -> Any: if not isinstance(UpperCamelCase_ , UpperCamelCase_ ): UpperCamelCase_ : Optional[int] = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , UpperCamelCase_ ) raise ValueError def __pow__(self , __UpperCamelCase ) -> Optional[Any]: if n < 0 or isinstance(UpperCamelCase_ , UpperCamelCase_ ): raise ValueError("""power must be a positive integer""" ) if n == 0: return 1 if n == 1: return self UpperCamelCase_ : str = self for _ in range(n - 1 ): x *= self return x def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ): if not callable(_SCREAMING_SNAKE_CASE ): raise ValueError("""differentiate() requires a function as input for func""" ) if not isinstance(_SCREAMING_SNAKE_CASE , (float, int) ): raise ValueError("""differentiate() requires a float as input for position""" ) if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError("""differentiate() requires an int as input for order""" ) UpperCamelCase_ : Optional[Any] = Dual(_SCREAMING_SNAKE_CASE , 1 ) UpperCamelCase_ : Any = func(_SCREAMING_SNAKE_CASE ) if order == 0: return result.real return result.duals[order - 1] * factorial(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod() def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ): return y**2 * y**4 print(differentiate(f, 9, 2))
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
0
from typing import Any class _a : """simple docstring""" def __init__( self , _snake_case ): _UpperCAmelCase =data _UpperCAmelCase =None class _a : """simple docstring""" def __init__( self ): _UpperCAmelCase =None def SCREAMING_SNAKE_CASE ( self ): _UpperCAmelCase =self.head while temp is not None: print(temp.data , end=" " ) _UpperCAmelCase =temp.next print() def SCREAMING_SNAKE_CASE ( self , _snake_case ): _UpperCAmelCase =Node(UpperCamelCase_ ) _UpperCAmelCase =self.head _UpperCAmelCase =new_node def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ): if node_data_a == node_data_a: return else: _UpperCAmelCase =self.head while node_a is not None and node_a.data != node_data_a: _UpperCAmelCase =node_a.next _UpperCAmelCase =self.head while node_a is not None and node_a.data != node_data_a: _UpperCAmelCase =node_a.next if node_a is None or node_a is None: return _UpperCAmelCase =node_a.data, node_a.data if __name__ == "__main__": snake_case__ : Optional[int] = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('After swapping') ll.print_list()
408
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" def _a ( self ): UpperCamelCase_: int = tf.convert_to_tensor( [ [ 8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0 -0.5_6_2_0_0_4_4, 5.2_3_2_2_9_7_5_2, 4.0_3_8_6_3_9_3, -6.8_7_9_8_3_7_8, -0.5_4_7_8_5_8_0_2, -3.2_0_1_2_1_5_3, 2.9_2_7_7_7_1_7_6, 1.8_8_1_7_1_9_5_3, 7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9 8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10 -9.8_5_7_1_1_8_3_6, -5.9_6_2_0_9_2_3_6, -1.1_3_0_3_9_1_6_1, -7.1_1_1_5_2_9_4, -0.8_3_6_9_6_3_3, -5.3_1_8_6_4_0_8, 7.0_6_4_2_7_4_0_7, 0.8_1_3_6_9_3_4_4, -0.8_2_0_2_3_8_1_7, -5.9_1_7_9_7_9_6, 0.5_8_8_1_3_4_4_3, -6.9_9_7_7_8_4_3_8, 4.7_1_5_5_1_1_8_9, -0.1_8_7_7_1_6_3_7, 7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25 9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26 2.1_2_6_6_2_9_4_1, -9.3_2_5_6_2_0_3_8, 2.3_5_6_5_2_5_2_2, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5_8_4_2_5_5_1_8, 4.5_3_1_3_9_2_3_8, -5.5_7_5_1_0_4_6_4, -6.2_8_0_3_0_6_9_9, -7.1_9_5_2_9_5_0_3, -4.0_2_1_2_2_5_5_1, 1.3_9_3_3_7_0_3_7, -6.0_6_7_0_7_0_5_7, 1.5_9_4_8_0_5_1_7, -9.6_4_3_1_1_9, 0.0_3_9_0_7_7_9_9, 0.6_7_2_3_1_7_6_2, -8.8_8_2_0_6_7_2_6, 6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13 2.2_8_5_2_0_7_2_3, 4.8_2_7_6_7_5_0_6, 4.3_0_4_2_1_3_6_8, 8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17 5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18 -4.4_7_3_5_7_9_4, 7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20 -2.9_1_0_5_1_6_6_3, 2.6_1_9_4_6_0_7_7, -2.5_6_7_4_7_6_2, -9.4_8_9_5_9_3_0_2, -4.0_2_9_2_2_6_4_5, -1.3_5_4_1_6_9_1_8, 9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27 -5.8_9_4_7_8_5_5_3, 1.8_5_3_7_0_4_6_7, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) UpperCamelCase_: Tuple = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above UpperCamelCase_: List[str] = tf.convert_to_tensor( [8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above UpperCamelCase_: Any = tf_top_k_top_p_filtering(UpperCamelCase_ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 ) UpperCamelCase_: List[Any] = output[output != -float('inf' )] UpperCamelCase_: Tuple = tf.cast( tf.where(tf.not_equal(UpperCamelCase_ , tf.constant(-float('inf' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1e-12 ) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ ) @require_tf class _lowerCAmelCase( unittest.TestCase , UpperCAmelCase_ ): """simple docstring""" if is_tf_available(): a : Any ={ '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def _a ( self ): UpperCamelCase_: int = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) UpperCamelCase_: str = 2 UpperCamelCase_: int = 2 class _lowerCAmelCase( tf.Module ): """simple docstring""" def __init__( self , _lowerCamelCase ): super(UpperCamelCase_ , self ).__init__() UpperCamelCase_: Optional[Any] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='input_ids' ), tf.TensorSpec((None, input_length) , tf.intaa , name='attention_mask' ), ) , jit_compile=UpperCamelCase_ , ) def _a ( self , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Optional[int] = self.model.generate( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , ) return {"sequences": outputs["sequences"]} UpperCamelCase_: str = [[2, 0], [1_0_2, 1_0_3]] UpperCamelCase_: str = [[1, 0], [1, 1]] UpperCamelCase_: str = DummyModel(model=UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'serving_default': dummy_model.serving} ) UpperCamelCase_: List[str] = tf.saved_model.load(UpperCamelCase_ ).signatures["serving_default"] for batch_size in range(1 , len(UpperCamelCase_ ) + 1 ): UpperCamelCase_: Optional[int] = { "input_ids": tf.constant(dummy_input_ids[:batch_size] ), "attention_mask": tf.constant(dummy_attention_masks[:batch_size] ), } UpperCamelCase_: str = serving_func(**UpperCamelCase_ )["sequences"] UpperCamelCase_: str = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ ) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ ) @slow def _a ( self ): UpperCamelCase_: Optional[int] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) UpperCamelCase_: List[str] = 1 UpperCamelCase_: List[Any] = 2 class _lowerCAmelCase( tf.Module ): """simple docstring""" def __init__( self , _lowerCamelCase ): super(UpperCamelCase_ , self ).__init__() UpperCamelCase_: int = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='input_ids' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='attention_mask' ), ) , jit_compile=UpperCamelCase_ , ) def _a ( self , _lowerCamelCase , _lowerCamelCase ): UpperCamelCase_: Tuple = self.model.generate( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , max_new_tokens=UpperCamelCase_ , return_dict_in_generate=UpperCamelCase_ , ) return {"sequences": outputs["sequences"]} UpperCamelCase_: Union[str, Any] = [[2], [1_0_2, 1_0_3]] UpperCamelCase_: str = [[1], [1, 1]] UpperCamelCase_: Optional[int] = DummyModel(model=UpperCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(UpperCamelCase_ , UpperCamelCase_ , signatures={'serving_default': dummy_model.serving} ) UpperCamelCase_: Tuple = tf.saved_model.load(UpperCamelCase_ ).signatures["serving_default"] for input_row in range(len(UpperCamelCase_ ) ): UpperCamelCase_: Dict = { "input_ids": tf.constant([dummy_input_ids[input_row]] ), "attention_mask": tf.constant([dummy_attention_masks[input_row]] ), } UpperCamelCase_: Dict = serving_func(**UpperCamelCase_ )["sequences"] UpperCamelCase_: int = test_model.generate(**UpperCamelCase_ , max_new_tokens=UpperCamelCase_ ) tf.debugging.assert_equal(UpperCamelCase_ , UpperCamelCase_ ) @slow @require_tensorflow_text def _a ( self ): with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='google/flan-t5-small' , filename='spiece.model' , local_dir=UpperCamelCase_ ) class _lowerCAmelCase( tf.keras.layers.Layer ): """simple docstring""" def __init__( self ): super().__init__() UpperCamelCase_: Dict = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(UpperCamelCase_ , 'spiece.model' ) , 'rb' ).read() ) UpperCamelCase_: List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('hf-internal-testing/tiny-random-t5' ) def _a ( self , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ): UpperCamelCase_: Optional[int] = self.tokenizer.tokenize(UpperCamelCase_ ) UpperCamelCase_: List[str] = text.pad_model_inputs( UpperCamelCase_ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id ) UpperCamelCase_: int = self.model.generate(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ ) return self.tokenizer.detokenize(UpperCamelCase_ ) UpperCamelCase_: Dict = CompleteSentenceTransformer() UpperCamelCase_: List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='inputs' ) UpperCamelCase_: Optional[Any] = complete_model(UpperCamelCase_ ) UpperCamelCase_: Optional[Any] = tf.keras.Model(UpperCamelCase_ , UpperCamelCase_ ) keras_model.save(UpperCamelCase_ ) def _a ( self ): UpperCamelCase_: Tuple = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 1_0, "temperature": 0.7, } UpperCamelCase_: List[str] = 1_4 UpperCamelCase_: Optional[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) UpperCamelCase_: List[str] = "Hello, my dog is cute and" UpperCamelCase_: Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='tf' ) UpperCamelCase_: List[str] = TFAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ) UpperCamelCase_: Optional[int] = 6_3_8 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) UpperCamelCase_: List[Any] = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) UpperCamelCase_: Dict = [6_3_8, 1_9_8] with tf.device(':/CPU:0' ): tf.random.set_seed(0 ) UpperCamelCase_: List[Any] = model.generate(**UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _a ( self ): UpperCamelCase_: int = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bart' ) UpperCamelCase_: Dict = "Hugging Face is a technology company based in New York and Paris." UpperCamelCase_: Tuple = bart_tokenizer(UpperCamelCase_ , return_tensors='tf' ).input_ids UpperCamelCase_: List[str] = TFBartForConditionalGeneration.from_pretrained('hf-internal-testing/tiny-random-bart' ) UpperCamelCase_: int = bart_model.generate(UpperCamelCase_ ).numpy() class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" def _a ( self , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase ): return super().call(UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase_: List[Any] = FakeBart.from_pretrained('hf-internal-testing/tiny-random-bart' ) UpperCamelCase_: Union[str, Any] = bart_model.generate(UpperCamelCase_ , foo='bar' ).numpy() self.assertTrue(np.array_equal(UpperCamelCase_ , UpperCamelCase_ ) ) class _lowerCAmelCase( bart_model.model.encoder.__class__ ): """simple docstring""" def _a ( self , _lowerCamelCase , **_lowerCamelCase ): return super().call(UpperCamelCase_ , **UpperCamelCase_ ) UpperCamelCase_: Dict = FakeEncoder(bart_model.config , bart_model.model.shared ) UpperCamelCase_: str = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) UpperCamelCase_: List[str] = bart_model.generate(UpperCamelCase_ ).numpy() with self.assertRaises(UpperCamelCase_ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(UpperCamelCase_ , foo='bar' )
57
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
0
'''simple docstring''' def lowerCamelCase ( _snake_case : Optional[int] ): '''simple docstring''' if not isinstance(_snake_case ,_snake_case ): raise ValueError("check_bouncy() accepts only integer arguments" ) lowercase__ = str(_snake_case ) lowercase__ = "".join(sorted(_snake_case ) ) return sorted_str_n != str_n and sorted_str_n[::-1] != str_n def lowerCamelCase ( _snake_case : Optional[Any] = 99 ): '''simple docstring''' if not 0 < percent < 100: raise ValueError("solution() only accepts values from 0 to 100" ) lowercase__ = 0 lowercase__ = 1 while True: if check_bouncy(_snake_case ): bouncy_num += 1 if (bouncy_num / num) * 100 >= percent: return num num += 1 if __name__ == "__main__": from doctest import testmod testmod() print(f'''{solution(99)}''')
267
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Dict ={ 'configuration_xmod': [ 'XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XmodConfig', 'XmodOnnxConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] =[ 'XMOD_PRETRAINED_MODEL_ARCHIVE_LIST', 'XmodForCausalLM', 'XmodForMaskedLM', 'XmodForMultipleChoice', 'XmodForQuestionAnswering', 'XmodForSequenceClassification', 'XmodForTokenClassification', 'XmodModel', 'XmodPreTrainedModel', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
434
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase ( lowerCAmelCase__ ): '''simple docstring''' SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer'] SCREAMING_SNAKE_CASE = 'ChineseCLIPImageProcessor' SCREAMING_SNAKE_CASE = ('BertTokenizer', 'BertTokenizerFast') def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCamelCase_ , ) UpperCAmelCase__ : Union[str, Any] = kwargs.pop("""feature_extractor""" ) UpperCAmelCase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCamelCase_ , UpperCamelCase_ ) UpperCAmelCase__ : List[str] = self.image_processor def __call__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: UpperCAmelCase__ : Dict = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) if images is not None: UpperCAmelCase__ : Any = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ ) if text is not None and images is not None: UpperCAmelCase__ : Any = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ ) def _a (self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def _a (self , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) @property def _a (self ): """simple docstring""" UpperCAmelCase__ : Any = self.tokenizer.model_input_names UpperCAmelCase__ : Tuple = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def _a (self ): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase_ , ) return self.image_processor_class
182
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
0
def UpperCamelCase ( _A : int )-> int: """simple docstring""" if not grid or not grid[0]: raise TypeError("The grid does not contain the appropriate information" ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] A__ = grid[0] for row_n in range(1 , len(_A ) ): A__ = grid[row_n] A__ = fill_row(_A , _A ) A__ = grid[row_n] return grid[-1][-1] def UpperCamelCase ( _A : Optional[Any] , _A : int )-> list: """simple docstring""" current_row[0] += row_above[0] for cell_n in range(1 , len(_A ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
491
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """microsoft/trocr-base-handwritten""": ( """https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json""" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'trocr' __lowerCamelCase = ['past_key_values'] __lowerCamelCase = { 'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model', 'num_hidden_layers': 'decoder_layers', } def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=1024 , _lowerCAmelCase=12 , _lowerCAmelCase=16 , _lowerCAmelCase=4096 , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=2 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ): UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : str = d_model UpperCAmelCase__ : Tuple = decoder_layers UpperCAmelCase__ : Optional[Any] = decoder_attention_heads UpperCAmelCase__ : Optional[int] = decoder_ffn_dim UpperCAmelCase__ : Optional[int] = activation_function UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : Union[str, Any] = dropout UpperCAmelCase__ : str = attention_dropout UpperCAmelCase__ : Any = activation_dropout UpperCAmelCase__ : Any = init_std UpperCAmelCase__ : List[str] = decoder_layerdrop UpperCAmelCase__ : Tuple = use_cache UpperCAmelCase__ : str = scale_embedding UpperCAmelCase__ : str = use_learned_position_embeddings UpperCAmelCase__ : List[Any] = layernorm_embedding super().__init__( pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , decoder_start_token_id=UpperCamelCase_ , **UpperCamelCase_ , )
79
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
0
def lowercase ( a , a = 0 ): '''simple docstring''' SCREAMING_SNAKE_CASE_ :List[str] = length or len(a ) SCREAMING_SNAKE_CASE_ :List[str] = False for i in range(length - 1 ): if list_data[i] > list_data[i + 1]: SCREAMING_SNAKE_CASE_ :int = list_data[i + 1], list_data[i] SCREAMING_SNAKE_CASE_ :List[Any] = True return list_data if not swapped else bubble_sort(a , length - 1 ) if __name__ == "__main__": import doctest doctest.testmod()
631
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
0
import logging import random import ray from transformers import RagConfig, RagRetriever, RagTokenizer from transformers.models.rag.retrieval_rag import CustomHFIndex SCREAMING_SNAKE_CASE__ : int = logging.getLogger(__name__) class a_ : def __init__( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = False def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict: """simple docstring""" if not self.initialized: SCREAMING_SNAKE_CASE_ = RagRetriever( UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , ) SCREAMING_SNAKE_CASE_ = True def A_( self ) -> Union[str, Any]: """simple docstring""" self.retriever.index.init_index() def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.retriever._main_retrieve(UpperCamelCase_ , UpperCamelCase_ ) return doc_ids, retrieved_doc_embeds class a_ ( SCREAMING_SNAKE_CASE__ ): def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]: """simple docstring""" if index is not None and index.is_initialized() and len(UpperCamelCase_ ) > 0: raise ValueError( 'When using Ray for distributed fine-tuning, ' 'you\'ll need to provide the paths instead, ' 'as the dataset and the index are loaded ' 'separately. More info in examples/rag/use_own_knowledge_dataset.py ' ) super().__init__( UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , index=UpperCamelCase_ , init_retrieval=UpperCamelCase_ , ) SCREAMING_SNAKE_CASE_ = retrieval_workers if len(self.retrieval_workers ) > 0: ray.get( [ worker.create_rag_retriever.remote(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for worker in self.retrieval_workers ] ) def A_( self ) -> Dict: """simple docstring""" logger.info('initializing retrieval' ) if len(self.retrieval_workers ) > 0: ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] ) else: # Non-distributed training. Load index into this same process. self.index.init_index() def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" if len(self.retrieval_workers ) > 0: # Select a random retrieval actor. SCREAMING_SNAKE_CASE_ = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )] SCREAMING_SNAKE_CASE_ = ray.get(random_worker.retrieve.remote(UpperCamelCase_ , UpperCamelCase_ ) ) else: SCREAMING_SNAKE_CASE_ = self._main_retrieve(UpperCamelCase_ , UpperCamelCase_ ) return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase_ ) @classmethod def A_( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" return super(UpperCamelCase_ , cls ).get_tokenizers(UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) @classmethod def A_( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = kwargs.pop('config' , UpperCamelCase_ ) or RagConfig.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ ) SCREAMING_SNAKE_CASE_ = RagTokenizer.from_pretrained(UpperCamelCase_ , config=UpperCamelCase_ ) SCREAMING_SNAKE_CASE_ = rag_tokenizer.question_encoder SCREAMING_SNAKE_CASE_ = rag_tokenizer.generator if indexed_dataset is not None: SCREAMING_SNAKE_CASE_ = "custom" SCREAMING_SNAKE_CASE_ = CustomHFIndex(config.retrieval_vector_size , UpperCamelCase_ ) else: SCREAMING_SNAKE_CASE_ = cls._build_index(UpperCamelCase_ ) return cls( UpperCamelCase_ , question_encoder_tokenizer=UpperCamelCase_ , generator_tokenizer=UpperCamelCase_ , retrieval_workers=UpperCamelCase_ , index=UpperCamelCase_ , )
205
"""simple docstring""" import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Tuple="<unk>" , UpperCamelCase_ : List[str]="<pad>" , UpperCamelCase_ : Union[str, Any]=125 , UpperCamelCase_ : Dict=None , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" if extra_ids > 0 and additional_special_tokens is None: __UpperCAmelCase : int = [F"<extra_id_{i}>" for i in range(UpperCamelCase_)] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens __UpperCAmelCase : Dict = len(set(filter(lambda UpperCamelCase_: bool("extra_id" in str(UpperCamelCase_)) , UpperCamelCase_))) if extra_tokens != extra_ids: raise ValueError( F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are" " provided to ByT5Tokenizer. In this case the additional_special_tokens must include the" " extra_ids tokens") __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else pad_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : List[str] = extra_ids __UpperCAmelCase : int = 2**8 # utf is 8 bits # define special tokens dict __UpperCAmelCase : Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } __UpperCAmelCase : Any = len(self.special_tokens_encoder) __UpperCAmelCase : List[Any] = len(UpperCamelCase_) for i, token in enumerate(UpperCamelCase_): __UpperCAmelCase : Union[str, Any] = self.vocab_size + i - n __UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def a_ ( self : List[Any]): """simple docstring""" return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def a_ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_)) + [1] return ([0] * len(UpperCamelCase_)) + [1] + ([0] * len(UpperCamelCase_)) + [1] def a_ ( self : Optional[Any] , UpperCamelCase_ : List[int]): """simple docstring""" if len(UpperCamelCase_) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated" " eos tokens being added.") return token_ids else: return token_ids + [self.eos_token_id] def a_ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos) * [0] return len(token_ids_a + eos + token_ids_a + eos) * [0] def a_ ( self : Optional[int] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None): """simple docstring""" __UpperCAmelCase : Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_) if token_ids_a is None: return token_ids_a else: __UpperCAmelCase : List[Any] = self._add_eos_if_not_present(UpperCamelCase_) return token_ids_a + token_ids_a def a_ ( self : List[str] , UpperCamelCase_ : str): """simple docstring""" __UpperCAmelCase : Any = [chr(UpperCamelCase_) for i in text.encode("utf-8")] return tokens def a_ ( self : Tuple , UpperCamelCase_ : List[Any]): """simple docstring""" if token in self.special_tokens_encoder: __UpperCAmelCase : Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: __UpperCAmelCase : int = self.added_tokens_encoder[token] elif len(UpperCamelCase_) != 1: __UpperCAmelCase : Optional[Any] = self.unk_token_id else: __UpperCAmelCase : Any = ord(UpperCamelCase_) + self._num_special_tokens return token_id def a_ ( self : Any , UpperCamelCase_ : List[str]): """simple docstring""" if index in self.special_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[index] else: __UpperCAmelCase : List[str] = chr(index - self._num_special_tokens) return token def a_ ( self : Dict , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : str = b"" for token in tokens: if token in self.special_tokens_decoder: __UpperCAmelCase : Tuple = self.special_tokens_decoder[token].encode("utf-8") elif token in self.added_tokens_decoder: __UpperCAmelCase : Any = self.special_tokens_decoder[token].encode("utf-8") elif token in self.special_tokens_encoder: __UpperCAmelCase : Optional[int] = token.encode("utf-8") elif token in self.added_tokens_encoder: __UpperCAmelCase : Optional[Any] = token.encode("utf-8") else: __UpperCAmelCase : Any = bytes([ord(UpperCamelCase_)]) bstring += tok_string __UpperCAmelCase : List[Any] = bstring.decode("utf-8" , errors="ignore") return string def a_ ( self : Optional[int] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" return ()
77
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) _a = { "configuration_mobilebert": [ "MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertOnnxConfig", ], "tokenization_mobilebert": ["MobileBertTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = ["MobileBertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilebert import ( MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertOnnxConfig, ) from .tokenization_mobilebert import MobileBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mobilebert_fast import MobileBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
481
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a__ ( unittest.TestCase ): def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : Union[str, Any] = embeddings_size __UpperCAmelCase : Dict = hidden_sizes __UpperCAmelCase : Dict = depths __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : str = num_labels __UpperCAmelCase : Optional[int] = scope __UpperCAmelCase : Dict = len(UpperCamelCase_) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) __UpperCAmelCase : Dict = self.get_config() return config, pixel_values def a_ ( self : Dict): """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]): """simple docstring""" __UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_) __UpperCAmelCase : Dict = model(UpperCamelCase_) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : List[Any] = self.num_labels __UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_) __UpperCAmelCase : str = model(UpperCamelCase_) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_flax class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () lowercase_ = False lowercase_ = False lowercase_ = False def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = FlaxRegNetModelTester(self) __UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_) def a_ ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def a_ ( self : Tuple): """simple docstring""" return def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_) @unittest.skip(reason="RegNet does not use inputs_embeds") def a_ ( self : Union[str, Any]): """simple docstring""" pass @unittest.skip(reason="RegNet does not support input and output embeddings") def a_ ( self : Optional[int]): """simple docstring""" pass def a_ ( self : str): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : int = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[int] = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Dict = ["pixel_values"] self.assertListEqual(arg_names[:1] , UpperCamelCase_) def a_ ( self : int): """simple docstring""" def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]): __UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : str = self.model_tester.num_stages self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1) __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[str] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Optional[int] = True check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): __UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_) @jax.jit def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]): return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_) with self.subTest("JIT Enabled"): __UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): __UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple() self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_)) for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_): self.assertEqual(jitted_output.shape , output.shape) def _UpperCamelCase ( ) -> Any: """simple docstring""" __UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_flax class a__ ( unittest.TestCase ): @cached_property def a_ ( self : Optional[int]): """simple docstring""" return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None @slow def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040") __UpperCAmelCase : Dict = self.default_image_processor __UpperCAmelCase : str = prepare_img() __UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np") __UpperCAmelCase : Dict = model(**UpperCamelCase_) # verify the logits __UpperCAmelCase : Dict = (1, 1000) self.assertEqual(outputs.logits.shape , UpperCamelCase_) __UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836]) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
77
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE : Dict = { "configuration_time_series_transformer": [ "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimeSeriesTransformerConfig", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ "TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TimeSeriesTransformerForPrediction", "TimeSeriesTransformerModel", "TimeSeriesTransformerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
635
"""simple docstring""" from scipy.stats import spearmanr import datasets A = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ A = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ A = r"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class a__ ( datasets.Metric ): def a_ ( self : Any): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float"), "references": datasets.Value("float"), }) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : int=False): """simple docstring""" __UpperCAmelCase : List[str] = spearmanr(UpperCamelCase_ , UpperCamelCase_) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
77
0
import argparse import os from accelerate.test_utils import execute_subprocess_async def lowerCamelCase__ ( _lowerCamelCase=None ) ->int: if subparsers is not None: _UpperCAmelCase =subparsers.add_parser("test" ) else: _UpperCAmelCase =argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=_lowerCamelCase , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=_lowerCamelCase ) return parser def lowerCamelCase__ ( _lowerCamelCase ) ->str: _UpperCAmelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: _UpperCAmelCase =script_name else: _UpperCAmelCase =F"--config_file={args.config_file} {script_name}" _UpperCAmelCase =["accelerate-launch"] + test_args.split() _UpperCAmelCase =execute_subprocess_async(_lowerCamelCase , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def lowerCamelCase__ ( ) ->List[Any]: _UpperCAmelCase =test_command_parser() _UpperCAmelCase =parser.parse_args() test_command(_lowerCamelCase ) if __name__ == "__main__": main()
408
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging A = logging.get_logger(__name__) A = {"""vocab_file""": """spiece.model"""} A = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } A = {"""bert_for_seq_generation""": 512} class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = [] lowercase_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : List[Any]="<::::>" , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : List[Any] , ): """simple docstring""" __UpperCAmelCase : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __UpperCAmelCase : Dict = vocab_file __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def a_ ( self : List[str]): """simple docstring""" return self.sp_model.get_piece_size() def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): """simple docstring""" __UpperCAmelCase : Optional[int] = self.__dict__.copy() __UpperCAmelCase : List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def a_ ( self : Any , UpperCamelCase_ : str): """simple docstring""" return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def a_ ( self : Optional[Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(UpperCamelCase_) def a_ ( self : Tuple , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : int = self.sp_model.IdToPiece(UpperCamelCase_) return token def a_ ( self : Dict , UpperCamelCase_ : Optional[Any]): """simple docstring""" __UpperCAmelCase : int = [] __UpperCAmelCase : Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token __UpperCAmelCase : List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def a_ ( self : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: __UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
77
0
import warnings from typing import Dict, List, Optional, Tuple from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging A_ : Optional[Any] = logging.get_logger(__name__) class _lowerCAmelCase( UpperCAmelCase_ ): """simple docstring""" a : Optional[int] =['''input_ids''', '''attention_mask'''] def __init__( self , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase=1_2_5 , _lowerCamelCase=None , **_lowerCamelCase , ): if extra_ids > 0 and additional_special_tokens is None: UpperCamelCase_: int = [f'''<extra_id_{i}>''' for i in range(UpperCamelCase_ )] elif extra_ids > 0 and additional_special_tokens is not None: # Check that we have the right number of extra_id special tokens UpperCamelCase_: Dict = len(set(filter(lambda _lowerCamelCase : bool('extra_id' in str(UpperCamelCase_ ) ) , UpperCamelCase_ ) ) ) if extra_tokens != extra_ids: raise ValueError( f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are''' ' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the' ' extra_ids tokens' ) UpperCamelCase_: List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else pad_token UpperCamelCase_: List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else eos_token UpperCamelCase_: Optional[int] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else unk_token super().__init__( eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , extra_ids=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) UpperCamelCase_: List[str] = extra_ids UpperCamelCase_: int = 2**8 # utf is 8 bits # define special tokens dict UpperCamelCase_: Dict[int, str] = { self.pad_token: 0, self.eos_token: 1, self.unk_token: 2, } UpperCamelCase_: Any = len(self.special_tokens_encoder ) UpperCamelCase_: List[Any] = len(UpperCamelCase_ ) for i, token in enumerate(UpperCamelCase_ ): UpperCamelCase_: Union[str, Any] = self.vocab_size + i - n UpperCamelCase_: Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()} @property def _a ( self ): return self._utf_vocab_size + self._num_special_tokens + self._extra_ids def _a ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) # normal case: some special tokens if token_ids_a is None: return ([0] * len(UpperCamelCase_ )) + [1] return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def _a ( self , _lowerCamelCase ): if len(UpperCamelCase_ ) > 0 and token_ids[-1] == self.eos_token_id: warnings.warn( f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated''' ' eos tokens being added.' ) return token_ids else: return token_ids + [self.eos_token_id] def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): UpperCamelCase_: Dict = [self.eos_token_id] if token_ids_a is None: return len(token_ids_a + eos ) * [0] return len(token_ids_a + eos + token_ids_a + eos ) * [0] def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): UpperCamelCase_: Optional[Any] = self._add_eos_if_not_present(UpperCamelCase_ ) if token_ids_a is None: return token_ids_a else: UpperCamelCase_: List[Any] = self._add_eos_if_not_present(UpperCamelCase_ ) return token_ids_a + token_ids_a def _a ( self , _lowerCamelCase ): UpperCamelCase_: Any = [chr(UpperCamelCase_ ) for i in text.encode('utf-8' )] return tokens def _a ( self , _lowerCamelCase ): if token in self.special_tokens_encoder: UpperCamelCase_: Any = self.special_tokens_encoder[token] elif token in self.added_tokens_encoder: UpperCamelCase_: int = self.added_tokens_encoder[token] elif len(UpperCamelCase_ ) != 1: UpperCamelCase_: Optional[Any] = self.unk_token_id else: UpperCamelCase_: Any = ord(UpperCamelCase_ ) + self._num_special_tokens return token_id def _a ( self , _lowerCamelCase ): if index in self.special_tokens_decoder: UpperCamelCase_: Any = self.special_tokens_decoder[index] else: UpperCamelCase_: List[str] = chr(index - self._num_special_tokens ) return token def _a ( self , _lowerCamelCase ): UpperCamelCase_: str = b"" for token in tokens: if token in self.special_tokens_decoder: UpperCamelCase_: Tuple = self.special_tokens_decoder[token].encode('utf-8' ) elif token in self.added_tokens_decoder: UpperCamelCase_: Any = self.special_tokens_decoder[token].encode('utf-8' ) elif token in self.special_tokens_encoder: UpperCamelCase_: Optional[int] = token.encode('utf-8' ) elif token in self.added_tokens_encoder: UpperCamelCase_: Optional[Any] = token.encode('utf-8' ) else: UpperCamelCase_: Any = bytes([ord(UpperCamelCase_ )] ) bstring += tok_string UpperCamelCase_: List[Any] = bstring.decode('utf-8' , errors='ignore' ) return string def _a ( self , _lowerCamelCase , _lowerCamelCase = None ): return ()
57
"""simple docstring""" import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A = """true""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=16 ) -> Tuple: """simple docstring""" set_seed(42 ) __UpperCAmelCase : Dict = RegressionModel() __UpperCAmelCase : Optional[Any] = deepcopy(UpperCamelCase ) __UpperCAmelCase : Any = RegressionDataset(length=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase , batch_size=UpperCamelCase ) model.to(accelerator.device ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return model, ddp_model, dataloader def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=False ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" ) __UpperCAmelCase : Dict = load_dataset("glue" , "mrpc" , split="validation" ) def tokenize_function(UpperCamelCase ): __UpperCAmelCase : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs with accelerator.main_process_first(): __UpperCAmelCase : str = dataset.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) __UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(UpperCamelCase ): if use_longest: return tokenizer.pad(UpperCamelCase , padding="longest" , return_tensors="pt" ) return tokenizer.pad(UpperCamelCase , padding="max_length" , max_length=128 , return_tensors="pt" ) return DataLoader(UpperCamelCase , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=16 ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: """simple docstring""" __UpperCAmelCase : List[Any] = Accelerator(dispatch_batches=UpperCamelCase , split_batches=UpperCamelCase ) __UpperCAmelCase : int = get_dataloader(UpperCamelCase , not dispatch_batches ) __UpperCAmelCase : Any = AutoModelForSequenceClassification.from_pretrained( "hf-internal-testing/mrpc-bert-base-cased" , return_dict=UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase , UpperCamelCase ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: """simple docstring""" __UpperCAmelCase : Dict = [] for batch in dataloader: __UpperCAmelCase , __UpperCAmelCase : int = batch.values() with torch.no_grad(): __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : List[str] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = [], [] for logit, targ in logits_and_targets: logits.append(UpperCamelCase ) targs.append(UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Optional[Any] = torch.cat(UpperCamelCase ), torch.cat(UpperCamelCase ) return logits, targs def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=82 , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=16 ) -> int: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = get_basic_setup(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = generate_predictions(UpperCamelCase , UpperCamelCase , UpperCamelCase ) assert ( len(UpperCamelCase ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(UpperCamelCase )}" def _UpperCamelCase ( UpperCamelCase = False , UpperCamelCase = False ) -> List[str]: """simple docstring""" __UpperCAmelCase : List[str] = evaluate.load("glue" , "mrpc" ) __UpperCAmelCase , __UpperCAmelCase : List[Any] = get_mrpc_setup(UpperCamelCase , UpperCamelCase ) # First do baseline __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = setup["no"] model.to(UpperCamelCase ) model.eval() for batch in dataloader: batch.to(UpperCamelCase ) with torch.inference_mode(): __UpperCAmelCase : List[str] = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=UpperCamelCase , references=batch["labels"] ) __UpperCAmelCase : str = metric.compute() # Then do distributed __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = setup["ddp"] model.eval() for batch in dataloader: with torch.inference_mode(): __UpperCAmelCase : Any = model(**UpperCamelCase ) __UpperCAmelCase : str = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase : Union[str, Any] = batch["labels"] __UpperCAmelCase , __UpperCAmelCase : Any = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=UpperCamelCase , references=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def _UpperCamelCase ( ) -> List[Any]: """simple docstring""" __UpperCAmelCase : Dict = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("**Testing gather_for_metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(UpperCamelCase , UpperCamelCase ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test torch metrics**" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: __UpperCAmelCase : Union[str, Any] = Accelerator(split_batches=UpperCamelCase , dispatch_batches=UpperCamelCase ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(UpperCamelCase , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("**Test last batch is not dropped when perfectly divisible**" ) __UpperCAmelCase : Any = Accelerator() test_torch_metrics(UpperCamelCase , 512 ) accelerator.state._reset_state() def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]: """simple docstring""" # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
77
0
'''simple docstring''' def lowerCamelCase ( _snake_case : int ,_snake_case : int ): '''simple docstring''' lowercase__ = [[] for _ in range(_snake_case )] lowercase__ = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1 or len(_snake_case ) <= key: return input_string for position, character in enumerate(_snake_case ): lowercase__ = position % (lowest * 2) # puts it in bounds lowercase__ = min(_snake_case ,lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_snake_case ) lowercase__ = ["".join(_snake_case ) for row in temp_grid] lowercase__ = "".join(_snake_case ) return output_string def lowerCamelCase ( _snake_case : Tuple ,_snake_case : List[Any] ): '''simple docstring''' lowercase__ = [] lowercase__ = key - 1 if key <= 0: raise ValueError("Height of grid can't be 0 or negative" ) if key == 1: return input_string lowercase__ = [[] for _ in range(_snake_case )] # generates template for position in range(len(_snake_case ) ): lowercase__ = position % (lowest * 2) # puts it in bounds lowercase__ = min(_snake_case ,lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("*" ) lowercase__ = 0 for row in temp_grid: # fills in the characters lowercase__ = input_string[counter : counter + len(_snake_case )] grid.append(list(_snake_case ) ) counter += len(_snake_case ) lowercase__ = "" # reads as zigzag for position in range(len(_snake_case ) ): lowercase__ = position % (lowest * 2) # puts it in bounds lowercase__ = min(_snake_case ,lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def lowerCamelCase ( _snake_case : Any ): '''simple docstring''' lowercase__ = {} for key_guess in range(1 ,len(_snake_case ) ): # tries every key lowercase__ = decrypt(_snake_case ,_snake_case ) return results if __name__ == "__main__": import doctest doctest.testmod()
267
"""simple docstring""" import math def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0 , UpperCamelCase = 0 ) -> list: """simple docstring""" __UpperCAmelCase : Union[str, Any] = end or len(UpperCamelCase ) for i in range(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = i __UpperCAmelCase : Any = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __UpperCAmelCase : Dict = array[temp_index - 1] temp_index -= 1 __UpperCAmelCase : str = temp_index_value return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> None: # Max Heap """simple docstring""" __UpperCAmelCase : Optional[Any] = index __UpperCAmelCase : List[str] = 2 * index + 1 # Left Node __UpperCAmelCase : Union[str, Any] = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __UpperCAmelCase : Tuple = left_index if right_index < heap_size and array[largest] < array[right_index]: __UpperCAmelCase : int = right_index if largest != index: __UpperCAmelCase , __UpperCAmelCase : List[str] = array[largest], array[index] heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" __UpperCAmelCase : List[Any] = len(UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __UpperCAmelCase , __UpperCAmelCase : int = array[0], array[i] heapify(UpperCamelCase , 0 , UpperCamelCase ) return array def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Optional[Any] = low __UpperCAmelCase : List[str] = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __UpperCAmelCase , __UpperCAmelCase : Optional[int] = array[j], array[i] i += 1 def _UpperCamelCase ( UpperCamelCase ) -> list: """simple docstring""" if len(UpperCamelCase ) == 0: return array __UpperCAmelCase : Optional[int] = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) ) __UpperCAmelCase : List[Any] = 16 return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> list: """simple docstring""" while end - start > size_threshold: if max_depth == 0: return heap_sort(UpperCamelCase ) max_depth -= 1 __UpperCAmelCase : List[Any] = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __UpperCAmelCase : Union[str, Any] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = p return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() A = input("""Enter numbers separated by a comma : """).strip() A = [float(item) for item in user_input.split(""",""")] print(sort(unsorted))
77
0
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ : Dict =logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[str] ='https://openaipublic.azureedge.net/jukebox/models/' SCREAMING_SNAKE_CASE__ : int ={ 'jukebox-1b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '1b_lyrics/prior_level_2.pth.tar', ], 'jukebox-5b-lyrics': [ '5b/vqvae.pth.tar', '5b/prior_level_0.pth.tar', '5b/prior_level_1.pth.tar', '5b_lyrics/prior_level_2.pth.tar', ], } def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]: if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10: _lowerCamelCase : str = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' ) elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10: _lowerCamelCase : Tuple = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' ) elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10: _lowerCamelCase : List[Any] = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' ) elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10: _lowerCamelCase : List[Any] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' ) if "conditioner_blocks.0." in key: _lowerCamelCase : Any = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' ) if "prime_prior" in key: _lowerCamelCase : Dict = key.replace('''prime_prior''' , '''encoder''' ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: _lowerCamelCase : Dict = key.replace('''.emb.''' , '''.''' ) if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook return key.replace('''.k''' , '''.codebook''' ) if "y_emb." in key: return key.replace('''y_emb.''' , '''metadata_embedding.''' ) if "x_emb.emb." in key: _lowerCamelCase : str = key.replace('''0.x_emb.emb''' , '''embed_tokens''' ) if "prime_state_ln" in key: return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' ) if ".ln" in key: return key.replace('''.ln''' , '''.layer_norm''' ) if "_ln" in key: return key.replace('''_ln''' , '''_layer_norm''' ) if "prime_state_proj" in key: return key.replace('''prime_state_proj''' , '''encoder.proj_in''' ) if "prime_x_out" in key: return key.replace('''prime_x_out''' , '''encoder.lm_head''' ) if "prior.x_out" in key: return key.replace('''x_out''' , '''fc_proj_out''' ) if "x_emb" in key: return key.replace('''x_emb''' , '''embed_tokens''' ) return key def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[int]: _lowerCamelCase : Union[str, Any] = {} import re _lowerCamelCase : Tuple = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) _lowerCamelCase : Union[str, Any] = re.compile( R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) _lowerCamelCase : Optional[Any] = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) _lowerCamelCase : int = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' ) _lowerCamelCase : Optional[int] = re.compile( R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) _lowerCamelCase : Union[str, Any] = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' ) _lowerCamelCase : Any = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' ) _lowerCamelCase : List[Any] = re.compile( R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' ) _lowerCamelCase : Any = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : str = re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Dict = regex_match.groups() _lowerCamelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) _lowerCamelCase : List[str] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' _lowerCamelCase : Optional[int] = re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Tuple = re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Tuple = regex_match.groups() _lowerCamelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) _lowerCamelCase : List[str] = {"1": 1, "3": 2}[groups[-2]] _lowerCamelCase : Union[str, Any] = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' _lowerCamelCase : Dict = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' _lowerCamelCase : Optional[int] = prefix + resnet_block _lowerCamelCase : Union[str, Any] = re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Optional[int] = re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Optional[Any] = regex_match.groups() _lowerCamelCase : Tuple = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' _lowerCamelCase : Dict = re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Tuple = re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Dict = regex_match.groups() _lowerCamelCase : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCamelCase : Tuple = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' _lowerCamelCase : Any = re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Optional[Any] = re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Optional[Any] = regex_match.groups() _lowerCamelCase : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2 _lowerCamelCase : Optional[int] = {"1": 1, "3": 2}[groups[-2]] _lowerCamelCase : Union[str, Any] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' _lowerCamelCase : Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' _lowerCamelCase : Optional[Any] = prefix + resnet_block _lowerCamelCase : Union[str, Any] = re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Union[str, Any] = re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : List[Any] = regex_match.groups() _lowerCamelCase : Optional[int] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' _lowerCamelCase : Union[str, Any] = re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : str = re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : List[Any] = regex_match.groups() _lowerCamelCase : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCamelCase : Optional[Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' _lowerCamelCase : Union[str, Any] = re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Optional[Any] = re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : List[Any] = regex_match.groups() _lowerCamelCase : List[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2 _lowerCamelCase : Optional[int] = {"1": 1, "3": 2}[groups[-2]] _lowerCamelCase : List[Any] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' _lowerCamelCase : List[Any] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' _lowerCamelCase : List[Any] = prefix + resnet_block _lowerCamelCase : List[str] = re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Any = re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Dict = regex_match.groups() _lowerCamelCase : Tuple = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' _lowerCamelCase : Dict = re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # keep original key else: _lowerCamelCase : int = original_key _lowerCamelCase : List[str] = replace_key(SCREAMING_SNAKE_CASE_ ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: _lowerCamelCase : Optional[Any] = model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) _lowerCamelCase : Dict = original_key _lowerCamelCase : str = original_key _lowerCamelCase : Dict = value return new_dict @torch.no_grad() def UpperCamelCase ( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) ->Optional[int]: for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' ): _lowerCamelCase : Optional[int] = requests.get(F'''{PREFIX}{file}''' , allow_redirects=SCREAMING_SNAKE_CASE_ ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=SCREAMING_SNAKE_CASE_ ) open(F'''{pytorch_dump_folder_path}/{file.split("/" )[-1]}''' , '''wb''' ).write(r.content ) _lowerCamelCase : List[Any] = MODEL_MAPPING[model_name.split('''/''' )[-1]] _lowerCamelCase : Optional[Any] = JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Union[str, Any] = JukeboxModel(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Union[str, Any] = [] _lowerCamelCase : Tuple = {} for i, dict_name in enumerate(SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Optional[int] = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}''' )["model"] _lowerCamelCase : List[Any] = {} for k in old_dic.keys(): if k.endswith('''.b''' ): _lowerCamelCase : int = old_dic[k] elif k.endswith('''.w''' ): _lowerCamelCase : Optional[Any] = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: _lowerCamelCase : Union[str, Any] = old_dic[k] else: _lowerCamelCase : Any = old_dic[k] _lowerCamelCase : Union[str, Any] = "vqvae" if i == 0 else F'''priors.{3 - i}''' _lowerCamelCase : List[str] = fix_jukebox_keys(SCREAMING_SNAKE_CASE_ , model.state_dict() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) weight_dict.append(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : Optional[Any] = weight_dict.pop(0 ) model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE_ ) for i in range(len(SCREAMING_SNAKE_CASE_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , '''w''' ) as txtfile: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) return weight_dict if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='jukebox-5b-lyrics', type=str, help='Name of the model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='jukebox-5b-lyrics-converted', type=str, help='Path to the output PyTorch model directory.', ) SCREAMING_SNAKE_CASE__ : List[str] =parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
434
"""simple docstring""" import numpy as np from PIL import Image def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : str = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = 0 # compute the shape of the output matrix __UpperCAmelCase : Optional[int] = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape __UpperCAmelCase : List[str] = np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix __UpperCAmelCase : str = np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : int = 0 __UpperCAmelCase : int = 0 return updated_arr def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> np.ndarray: """simple docstring""" __UpperCAmelCase : List[str] = np.array(UpperCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Any = 0 # compute the shape of the output matrix __UpperCAmelCase : Tuple = (arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape __UpperCAmelCase : str = np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix __UpperCAmelCase : Tuple = int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Optional[Any] = 0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name="""avgpooling""", verbose=True) # Loading the image A = Image.open("""path_to_image""") # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
77
0
"""simple docstring""" import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, ByTaTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): _A = """pt""" elif is_tf_available(): _A = """tf""" else: _A = """jax""" class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE = ByTaTokenizer SCREAMING_SNAKE_CASE = False def _a (self ): """simple docstring""" super().setUp() UpperCAmelCase__ : List[str] = ByTaTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def _a (self ): """simple docstring""" return ByTaTokenizer.from_pretrained("""google/byt5-small""" ) def _a (self , **_lowerCamelCase ): """simple docstring""" return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def _a (self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=20 , _lowerCamelCase=5 ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = [] for i in range(len(UpperCamelCase_ ) ): try: UpperCAmelCase__ : Optional[int] = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase_ ) except UnicodeDecodeError: pass toks.append((i, tok) ) UpperCAmelCase__ : List[str] = list(filter(lambda _lowerCamelCase : re.match(r"""^[ a-zA-Z]+$""" , t[1] ) , UpperCamelCase_ ) ) UpperCAmelCase__ : Dict = list(filter(lambda _lowerCamelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase_ ) , UpperCamelCase_ ) ) if max_length is not None and len(UpperCamelCase_ ) > max_length: UpperCAmelCase__ : Optional[int] = toks[:max_length] if min_length is not None and len(UpperCamelCase_ ) < min_length and len(UpperCamelCase_ ) > 0: while len(UpperCamelCase_ ) < min_length: UpperCAmelCase__ : Dict = toks + toks # toks_str = [t[1] for t in toks] UpperCAmelCase__ : Optional[int] = [t[0] for t in toks] # Ensure consistency UpperCAmelCase__ : List[str] = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ ) if " " not in output_txt and len(UpperCamelCase_ ) > 1: UpperCAmelCase__ : Optional[int] = ( tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase_ ) + " " + tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase_ ) ) if with_prefix_space: UpperCAmelCase__ : Any = " " + output_txt UpperCAmelCase__ : str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) return output_txt, output_ids def _a (self ): """simple docstring""" UpperCAmelCase__ : Any = self.ta_base_tokenizer UpperCAmelCase__ : Any = tokenizer(["""hi</s>""", """I went to the gym</s>""", """</s>"""] ) UpperCAmelCase__ : List[str] = tokenizer(["""hi""", """I went to the gym""", """"""] ) self.assertListEqual(batch_with_eos_added["""input_ids"""] , batch_without_eos_added["""input_ids"""] ) def _a (self ): """simple docstring""" UpperCAmelCase__ : str = self.ta_base_tokenizer UpperCAmelCase__ : Tuple = "Unicode €." UpperCAmelCase__ : List[str] = tokenizer(UpperCamelCase_ ) UpperCAmelCase__ : Optional[int] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase_ ) # decoding UpperCAmelCase__ : Optional[Any] = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , """Unicode €.</s>""" ) UpperCAmelCase__ : Any = tokenizer("""e è é ê ë""" ) UpperCAmelCase__ : Any = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1] self.assertEqual(encoded["""input_ids"""] , UpperCamelCase_ ) # decoding UpperCAmelCase__ : str = tokenizer.decode(UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , """e è é ê ë</s>""" ) # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("""e è é ê ë""" ) ) , """e è é ê ë</s>""" ) def _a (self ): """simple docstring""" UpperCAmelCase__ : str = self.ta_base_tokenizer UpperCAmelCase__ : Optional[int] = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off UpperCAmelCase__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0] # fmt: on UpperCAmelCase__ : Any = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) if FRAMEWORK != "jax": UpperCAmelCase__ : str = list(batch.input_ids.numpy()[0] ) else: UpperCAmelCase__ : str = list(batch.input_ids.tolist()[0] ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertEqual((2, 37) , batch.input_ids.shape ) self.assertEqual((2, 37) , batch.attention_mask.shape ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = self.ta_base_tokenizer UpperCAmelCase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] UpperCAmelCase__ : Optional[Any] = tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) # check if input_ids are returned and no decoder_input_ids self.assertIn("""input_ids""" , UpperCamelCase_ ) self.assertIn("""attention_mask""" , UpperCamelCase_ ) self.assertNotIn("""decoder_input_ids""" , UpperCamelCase_ ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase_ ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Any = self.ta_base_tokenizer UpperCAmelCase__ : Tuple = [ "Summary of the text.", "Another summary.", ] UpperCAmelCase__ : Union[str, Any] = tokenizer( text_target=UpperCamelCase_ , max_length=32 , padding="""max_length""" , truncation=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = self.ta_base_tokenizer UpperCAmelCase__ : Any = ["A long paragraph for summarization. </s>"] UpperCAmelCase__ : str = ["Summary of the text. </s>"] # fmt: off UpperCAmelCase__ : Optional[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1] UpperCAmelCase__ : Dict = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1] # fmt: on UpperCAmelCase__ : List[str] = tokenizer(UpperCamelCase_ , text_target=UpperCamelCase_ ) self.assertEqual(UpperCamelCase_ , batch["""input_ids"""][0] ) self.assertEqual(UpperCamelCase_ , batch["""labels"""][0] ) def _a (self ): """simple docstring""" UpperCAmelCase__ : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test UpperCAmelCase__ : str = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase__ : List[Any] = tempfile.mkdtemp() UpperCAmelCase__ : int = " He is very happy, UNwant\u00E9d,running" UpperCAmelCase__ : str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) UpperCAmelCase__ : Optional[Any] = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) shutil.rmtree(UpperCamelCase_ ) UpperCAmelCase__ : Tuple = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp() UpperCAmelCase__ : Any = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["""bim""", """bambam"""] ) UpperCAmelCase__ : Optional[int] = tokenizer.additional_special_tokens additional_special_tokens.append("""new_additional_special_token""" ) tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} ) UpperCAmelCase__ : Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) tokenizer.save_pretrained(UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(UpperCamelCase_ ) UpperCAmelCase__ : List[Any] = after_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertIn("""new_additional_special_token""" , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) UpperCAmelCase__ : List[Any] = tokenizer.__class__.from_pretrained(UpperCamelCase_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(UpperCamelCase_ ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file: UpperCAmelCase__ : int = json.load(UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file: UpperCAmelCase__ : Dict = json.load(UpperCamelCase_ ) UpperCAmelCase__ : Dict = [F"""<extra_id_{i}>""" for i in range(125 )] UpperCAmelCase__ : Dict = added_tokens_extra_ids + [ "an_additional_special_token" ] UpperCAmelCase__ : int = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(UpperCamelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) with open(os.path.join(UpperCamelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files UpperCAmelCase__ : List[Any] = tokenizer_class.from_pretrained( UpperCamelCase_ , ) self.assertIn( """an_additional_special_token""" , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( ["""an_additional_special_token"""] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["""an_additional_special_token"""] ) ) , ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained UpperCAmelCase__ : Any = added_tokens_extra_ids + [AddedToken("""a_new_additional_special_token""" , lstrip=UpperCamelCase_ )] UpperCAmelCase__ : Optional[Any] = tokenizer_class.from_pretrained( UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , ) self.assertIn("""a_new_additional_special_token""" , tokenizer.additional_special_tokens ) self.assertEqual( ["""a_new_additional_special_token"""] , tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["""a_new_additional_special_token"""] ) ) , ) def _a (self ): """simple docstring""" UpperCAmelCase__ : List[str] = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(UpperCamelCase_ ) UpperCAmelCase__ : Any = tokenizer_class.from_pretrained(UpperCamelCase_ ) self.assertTrue(tokenizer.decode([255] ) == """""" ) def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" pass def _a (self ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = self.get_tokenizers(fast=UpperCamelCase_ , do_lower_case=UpperCamelCase_ ) for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase__ : Optional[Any] = ["t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "x", "t", "</s>"] UpperCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_string(UpperCamelCase_ ) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): UpperCAmelCase__ : List[Any] = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : str = tokenizer.convert_ids_to_tokens( UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) for attr in attributes_list: setattr(UpperCamelCase_ , attr + """_id""" , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + """_id""" ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , attr + """_id""" , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , UpperCamelCase_ ) , UpperCamelCase_ ) self.assertEqual(getattr(UpperCamelCase_ , attr + """_id""" ) , UpperCamelCase_ ) setattr(UpperCamelCase_ , """additional_special_tokens_ids""" , [] ) self.assertListEqual(getattr(UpperCamelCase_ , """additional_special_tokens""" ) , [] ) self.assertListEqual(getattr(UpperCamelCase_ , """additional_special_tokens_ids""" ) , [] ) setattr(UpperCamelCase_ , """additional_special_tokens_ids""" , [token_id_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , """additional_special_tokens""" ) , [token_to_test_setters] ) self.assertListEqual(getattr(UpperCamelCase_ , """additional_special_tokens_ids""" ) , [token_id_to_test_setters] )
182
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: A = None A = logging.get_logger(__name__) A = """▁""" A = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} A = { """vocab_file""": {"""google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"""}, """tokenizer_file""": { """google/pegasus-xsum""": """https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json""" }, } A = { """google/pegasus-xsum""": 512, } class a__ ( __magic_name__ ): lowercase_ = VOCAB_FILES_NAMES lowercase_ = PRETRAINED_VOCAB_FILES_MAP lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ = PegasusTokenizer lowercase_ = ["input_ids", "attention_mask"] def __init__( self : str , UpperCamelCase_ : str=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : Optional[Any]="</s>" , UpperCamelCase_ : Any="<unk>" , UpperCamelCase_ : Tuple="<mask_2>" , UpperCamelCase_ : Any="<mask_1>" , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : str=103 , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" __UpperCAmelCase : Optional[int] = offset if additional_special_tokens is not None: if not isinstance(UpperCamelCase_ , UpperCamelCase_): raise TypeError( F"additional_special_tokens should be of type {type(UpperCamelCase_)}, but is" F" {type(UpperCamelCase_)}") __UpperCAmelCase : Any = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ F"<unk_{i}>" for i in range(len(UpperCamelCase_) , self.offset - 1) ] if len(set(UpperCamelCase_)) != len(UpperCamelCase_): raise ValueError( "Please make sure that the provided additional_special_tokens do not contain an incorrectly" F" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.") __UpperCAmelCase : str = additional_special_tokens_extended else: __UpperCAmelCase : Tuple = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [F"<unk_{i}>" for i in range(2 , self.offset)] super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , pad_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , mask_token_sent=UpperCamelCase_ , offset=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : List[str] = False if not self.vocab_file else True def a_ ( self : Union[str, Any] , UpperCamelCase_ : Optional[int]): """simple docstring""" __UpperCAmelCase : int = set(self.all_special_ids) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens) + 3)): raise ValueError( "There should be 3 special tokens: mask_token, pad_token, and eos_token +" F" {len(self.additional_special_tokens)} additional_special_tokens, but got {all_special_ids}") return [1 if x in all_special_ids else 0 for x in seq] def a_ ( self : Union[str, Any] , UpperCamelCase_ : List , UpperCamelCase_ : Optional[List] = None , UpperCamelCase_ : bool = False): """simple docstring""" if already_has_special_tokens: return self._special_token_mask(UpperCamelCase_) elif token_ids_a is None: return self._special_token_mask(UpperCamelCase_) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a) + [1] def a_ ( self : int , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=None): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None): """simple docstring""" if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer.") if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return __UpperCAmelCase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_): copyfile(self.vocab_file , UpperCamelCase_) return (out_vocab_file,)
77
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version UpperCAmelCase_ : str = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt") def UpperCamelCase ( _A : Optional[Any] , _A : Any , _A : List[Any] = 16000 )-> List[Any]: """simple docstring""" A__ = int(round(sample_rate * max_length ) ) if len(_A ) <= sample_length: return wav A__ = randint(0 , len(_A ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class UpperCamelCase : lowerCAmelCase : Optional[int] = field(default=_UpperCAmelCase , metadata={"""help""": """Name of a dataset from the datasets package"""} ) lowerCAmelCase : List[Any] = field( default=_UpperCAmelCase , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} ) lowerCAmelCase : Tuple = field( default=_UpperCAmelCase , metadata={"""help""": """A file containing the training audio paths and labels."""} ) lowerCAmelCase : Optional[int] = field( default=_UpperCAmelCase , metadata={"""help""": """A file containing the validation audio paths and labels."""} ) lowerCAmelCase : List[str] = field( default="""train""" , metadata={ """help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'""" } , ) lowerCAmelCase : List[Any] = field( default="""validation""" , metadata={ """help""": ( """The name of the training data set split to use (via the datasets library). Defaults to 'validation'""" ) } , ) lowerCAmelCase : Dict = field( default="""audio""" , metadata={"""help""": """The name of the dataset column containing the audio data. Defaults to 'audio'"""} , ) lowerCAmelCase : List[str] = field( default="""label""" , metadata={"""help""": """The name of the dataset column containing the labels. Defaults to 'label'"""} ) lowerCAmelCase : Dict = field( default=_UpperCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) lowerCAmelCase : str = field( default=_UpperCAmelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) lowerCAmelCase : int = field( default=20 , metadata={"""help""": """Audio clips will be randomly cut to this length during training if the value is set."""} , ) @dataclass class UpperCamelCase : lowerCAmelCase : str = field( default="""facebook/wav2vec2-base""" , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} , ) lowerCAmelCase : int = field( default=_UpperCAmelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) lowerCAmelCase : Tuple = field( default=_UpperCAmelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from the Hub"""} ) lowerCAmelCase : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) lowerCAmelCase : Dict = field( default=_UpperCAmelCase , metadata={"""help""": """Name or path of preprocessor config."""} ) lowerCAmelCase : List[str] = field( default=_UpperCAmelCase , metadata={"""help""": """Whether to freeze the feature encoder layers of the model."""} ) lowerCAmelCase : Tuple = field( default=_UpperCAmelCase , metadata={"""help""": """Whether to generate an attention mask in the feature extractor."""} ) lowerCAmelCase : str = field( default=_UpperCAmelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) lowerCAmelCase : str = field( default=_UpperCAmelCase , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} ) lowerCAmelCase : List[str] = field( default=_UpperCAmelCase , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , ) def __A ( self ): if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( "The argument `--freeze_feature_extractor` is deprecated and " "will be removed in a future version. Use `--freeze_feature_encoder`" "instead. Setting `freeze_feature_encoder==True`." , UpperCamelCase_ , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( "The argument `--freeze_feature_extractor` is deprecated and " "should not be used in combination with `--freeze_feature_encoder`." "Only make use of `--freeze_feature_encoder`." ) def UpperCamelCase ( )-> Any: """simple docstring""" A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. A__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: A__ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_audio_classification" , _A , _A ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() A__ = training_args.get_process_log_level() logger.setLevel(_A ) transformers.utils.logging.set_verbosity(_A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(f"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. A__ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: A__ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to train from scratch." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset and prepare it for the audio classification task. A__ = DatasetDict() A__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) A__ = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ "Make sure to set `--audio_column_name` to the correct audio column - one of " f"""{", ".join(raw_datasets["train"].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( f"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ "Make sure to set `--label_column_name` to the correct text column - one of " f"""{", ".join(raw_datasets["train"].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy A__ = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. A__ = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) A__ = feature_extractor.model_input_names[0] def train_transforms(_A : int ): A__ = [] for audio in batch[data_args.audio_column_name]: A__ = random_subsample( audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(_A ) A__ = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate ) A__ = {model_input_name: inputs.get(_A )} A__ = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(_A : Any ): A__ = [audio["array"] for audio in batch[data_args.audio_column_name]] A__ = feature_extractor(_A , sampling_rate=feature_extractor.sampling_rate ) A__ = {model_input_name: inputs.get(_A )} A__ = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. A__ = raw_datasets["train"].features[data_args.label_column_name].names A__ = {}, {} for i, label in enumerate(_A ): A__ = str(_A ) A__ = label # Load the accuracy metric from the datasets package A__ = evaluate.load("accuracy" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(_A : Tuple ): A__ = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=_A , references=eval_pred.label_ids ) A__ = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(_A ) , labelaid=_A , idalabel=_A , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) A__ = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: A__ = ( raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(_A , output_all_columns=_A ) if training_args.do_eval: if data_args.max_eval_samples is not None: A__ = ( raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(_A , output_all_columns=_A ) # Initialize our trainer A__ = Trainer( model=_A , args=_A , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_A , tokenizer=_A , ) # Training if training_args.do_train: A__ = None if training_args.resume_from_checkpoint is not None: A__ = training_args.resume_from_checkpoint elif last_checkpoint is not None: A__ = last_checkpoint A__ = trainer.train(resume_from_checkpoint=_A ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: A__ = trainer.evaluate() trainer.log_metrics("eval" , _A ) trainer.save_metrics("eval" , _A ) # Write model card and (optionally) push to hub A__ = { "finetuned_from": model_args.model_name_or_path, "tasks": "audio-classification", "dataset": data_args.dataset_name, "tags": ["audio-classification"], } if training_args.push_to_hub: trainer.push_to_hub(**_A ) else: trainer.create_model_card(**_A ) if __name__ == "__main__": main()
491
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: """simple docstring""" # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file __UpperCAmelCase : Optional[Any] = TapasConfig.from_json_file(UpperCamelCase ) # set absolute/relative position embeddings parameter __UpperCAmelCase : Optional[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": __UpperCAmelCase : List[str] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WTQ": # run_task_main.py hparams __UpperCAmelCase : Tuple = 4 __UpperCAmelCase : Any = True # hparam_utils.py hparams __UpperCAmelCase : Union[str, Any] = 0.664694 __UpperCAmelCase : Union[str, Any] = 0.207951 __UpperCAmelCase : int = 0.121194 __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : List[str] = True __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : List[str] = 0.0352513 __UpperCAmelCase : Optional[int] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams __UpperCAmelCase : int = 4 __UpperCAmelCase : Optional[int] = False # hparam_utils.py hparams __UpperCAmelCase : int = 36.4519 __UpperCAmelCase : str = 0.903421 __UpperCAmelCase : Dict = 222.088 __UpperCAmelCase : Dict = True __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Tuple = True __UpperCAmelCase : Any = 0.763141 __UpperCAmelCase : Optional[Any] = TapasForQuestionAnswering(config=UpperCamelCase ) elif task == "TABFACT": __UpperCAmelCase : Union[str, Any] = TapasForSequenceClassification(config=UpperCamelCase ) elif task == "MLM": __UpperCAmelCase : Tuple = TapasForMaskedLM(config=UpperCamelCase ) elif task == "INTERMEDIATE_PRETRAINING": __UpperCAmelCase : List[str] = TapasModel(config=UpperCamelCase ) else: raise ValueError(f"Task {task} not supported." ) print(f"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Save pytorch-model (weights and configuration) print(f"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(UpperCamelCase ) # Save tokenizer files print(f"Save tokenizer files to {pytorch_dump_path}" ) __UpperCAmelCase : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt" , model_max_length=512 ) tokenizer.save_pretrained(UpperCamelCase ) print("Used relative position embeddings:" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
77
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Union[str, Any] = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'vit_msn' def __init__( self , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=1e-06 , _lowerCAmelCase=224 , _lowerCAmelCase=16 , _lowerCAmelCase=3 , _lowerCAmelCase=True , **_lowerCAmelCase , ): super().__init__(**UpperCamelCase_ ) UpperCAmelCase__ : Union[str, Any] = hidden_size UpperCAmelCase__ : str = num_hidden_layers UpperCAmelCase__ : List[str] = num_attention_heads UpperCAmelCase__ : int = intermediate_size UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob UpperCAmelCase__ : int = initializer_range UpperCAmelCase__ : Dict = layer_norm_eps UpperCAmelCase__ : Optional[int] = image_size UpperCAmelCase__ : Optional[Any] = patch_size UpperCAmelCase__ : Any = num_channels UpperCAmelCase__ : List[Any] = qkv_bias
79
"""simple docstring""" from typing import Union import fire import torch from tqdm import tqdm def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = "cpu" , UpperCamelCase = None ) -> None: """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase , map_location=UpperCamelCase ) for k, v in tqdm(state_dict.items() ): if not isinstance(UpperCamelCase , torch.Tensor ): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" ) __UpperCAmelCase : Optional[Any] = v.half() if save_path is None: # overwrite src_path __UpperCAmelCase : str = src_path torch.save(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": fire.Fire(convert)
77
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"} SCREAMING_SNAKE_CASE__ = { "vocab_file": { "bert_for_seq_generation": ( "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model" ), } } SCREAMING_SNAKE_CASE__ = {"bert_for_seq_generation": 512} class _UpperCAmelCase ( lowercase ): lowerCamelCase_ : Optional[Any] = VOCAB_FILES_NAMES lowerCamelCase_ : int = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ : str = [] lowerCamelCase_ : int = ["""input_ids""", """attention_mask"""] def __init__( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : Optional[Any]="</s>" , UpperCAmelCase : Optional[int]="<unk>" , UpperCAmelCase : int="<pad>" , UpperCAmelCase : List[Any]="<::::>" , UpperCAmelCase : Optional[Dict[str, Any]] = None , **UpperCAmelCase : List[Any] , ): SCREAMING_SNAKE_CASE_ :List[str] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) SCREAMING_SNAKE_CASE_ :Dict = vocab_file SCREAMING_SNAKE_CASE_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(UpperCamelCase_) @property def _snake_case ( self : List[str]): return self.sp_model.get_piece_size() def _snake_case ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_ :int = {self.convert_ids_to_tokens(UpperCamelCase_): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : int): SCREAMING_SNAKE_CASE_ :Optional[int] = self.__dict__.copy() SCREAMING_SNAKE_CASE_ :List[Any] = None return state def __setstate__( self : Optional[Any] , UpperCAmelCase : Optional[int]): SCREAMING_SNAKE_CASE_ :Optional[Any] = d # for backward compatibility if not hasattr(self , "sp_model_kwargs"): SCREAMING_SNAKE_CASE_ :List[Any] = {} SCREAMING_SNAKE_CASE_ :Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _snake_case ( self : Any , UpperCAmelCase : str): return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_) def _snake_case ( self : Optional[Any] , UpperCAmelCase : Optional[int]): return self.sp_model.piece_to_id(UpperCamelCase_) def _snake_case ( self : Tuple , UpperCAmelCase : int): SCREAMING_SNAKE_CASE_ :int = self.sp_model.IdToPiece(UpperCamelCase_) return token def _snake_case ( self : Dict , UpperCAmelCase : Optional[Any]): SCREAMING_SNAKE_CASE_ :int = [] SCREAMING_SNAKE_CASE_ :Tuple = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(UpperCamelCase_) + token SCREAMING_SNAKE_CASE_ :List[Any] = [] else: current_sub_tokens.append(UpperCamelCase_) out_string += self.sp_model.decode(UpperCamelCase_) return out_string.strip() def _snake_case ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None): if not os.path.isdir(UpperCamelCase_): logger.error(F"Vocabulary path ({save_directory}) should be a directory") return SCREAMING_SNAKE_CASE_ :Tuple = os.path.join( UpperCamelCase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]) if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase_) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , UpperCamelCase_) elif not os.path.isfile(self.vocab_file): with open(UpperCamelCase_ , "wb") as fi: SCREAMING_SNAKE_CASE_ :List[str] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_) return (out_vocab_file,)
631
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": A = pd.read_csv("""sample_data.csv""", header=None) A = df.shape[:1][0] # If you're using some other dataset input the target column A = df.iloc[:, 1:2] A = actual_data.values.reshape(len_data, 1) A = MinMaxScaler().fit_transform(actual_data) A = 10 A = 5 A = 20 A = len_data - periods * look_back A = actual_data[:division] A = actual_data[division - look_back :] A , A = [], [] A , A = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) A = np.array(train_x) A = np.array(test_x) A = np.array([list(i.ravel()) for i in train_y]) A = np.array([list(i.ravel()) for i in test_y]) A = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss="""mean_squared_error""", optimizer="""adam""") A = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) A = model.predict(x_test)
77
0
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class a_ : @staticmethod def A_( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" pass @is_pipeline_test @require_vision class a_ ( unittest.TestCase ): @require_torch def A_( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , ) SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE_ = image_classifier(UpperCamelCase_ , candidate_labels=['a', 'b', 'c'] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(UpperCamelCase_ ) , [ [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}], [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'c'}, {'score': 0.3_3_3, 'label': 'b'}], ] , ) SCREAMING_SNAKE_CASE_ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], ] , ) @require_tf def A_( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = pipeline( model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' ) SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE_ = image_classifier(UpperCamelCase_ , candidate_labels=['a', 'b', 'c'] ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [{'score': 0.3_3_3, 'label': 'a'}, {'score': 0.3_3_3, 'label': 'b'}, {'score': 0.3_3_3, 'label': 'c'}] , ) SCREAMING_SNAKE_CASE_ = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], [ {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, {'score': 0.3_3_3, 'label': ANY(UpperCamelCase_ )}, ], ] , ) @slow @require_torch def A_( self ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE_ = image_classifier(UpperCamelCase_ , candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ] , ) SCREAMING_SNAKE_CASE_ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ], ] * 5 , ) @slow @require_tf def A_( self ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = pipeline( task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' ) # This is an image of 2 cats with remotes and no planes SCREAMING_SNAKE_CASE_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) SCREAMING_SNAKE_CASE_ = image_classifier(UpperCamelCase_ , candidate_labels=['cat', 'plane', 'remote'] ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ] , ) SCREAMING_SNAKE_CASE_ = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 ) self.assertEqual( nested_simplify(UpperCamelCase_ ) , [ [ {'score': 0.5_1_1, 'label': 'remote'}, {'score': 0.4_8_5, 'label': 'cat'}, {'score': 0.0_0_4, 'label': 'plane'}, ], ] * 5 , )
205
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin A = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right A = 250_004 A = 250_020 @require_sentencepiece @require_tokenizers class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = MBartTokenizer lowercase_ = MBartTokenizerFast lowercase_ = True lowercase_ = True def a_ ( self : str): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : Any = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Dict = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("This is a test") self.assertListEqual(UpperCamelCase_ , ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase_) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) __UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) __UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_) self.assertListEqual( UpperCamelCase_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def a_ ( self : Dict): """simple docstring""" if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return __UpperCAmelCase : Dict = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"): __UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_) __UpperCAmelCase : int = tempfile.mkdtemp() __UpperCAmelCase : Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_) __UpperCAmelCase : Any = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) __UpperCAmelCase : Any = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f) self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=True __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() __UpperCAmelCase : Dict = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : int = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it save with the same files self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_) # Checks everything loads correctly in the same way __UpperCAmelCase : int = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) # Save tokenizer rust, legacy_format=False __UpperCAmelCase : Tuple = tempfile.mkdtemp() __UpperCAmelCase : int = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_) __UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCamelCase_) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way __UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(UpperCamelCase_) __UpperCAmelCase : str = tokenizer_p.from_pretrained(UpperCamelCase_) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_)) shutil.rmtree(UpperCamelCase_) @require_torch @require_sentencepiece @require_tokenizers class a__ ( unittest.TestCase ): lowercase_ = "facebook/mbart-large-en-ro" lowercase_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] lowercase_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] lowercase_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls : int): """simple docstring""" __UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO") __UpperCAmelCase : Union[str, Any] = 1 return cls def a_ ( self : List[Any]): """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) def a_ ( self : Optional[int]): """simple docstring""" self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids) __UpperCAmelCase : Union[str, Any] = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] __UpperCAmelCase : Optional[Any] = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_) __UpperCAmelCase : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_) self.assertEqual(UpperCamelCase_ , UpperCamelCase_) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_) def a_ ( self : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = ["this is gunna be a long sentence " * 20] assert isinstance(src_text[0] , UpperCamelCase_) __UpperCAmelCase : Tuple = 10 __UpperCAmelCase : List[Any] = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_).input_ids[0] self.assertEqual(ids[-2] , 2) self.assertEqual(ids[-1] , UpperCamelCase_) self.assertEqual(len(UpperCamelCase_) , UpperCamelCase_) def a_ ( self : Any): """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001]) def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : List[str] = tempfile.mkdtemp() __UpperCAmelCase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCamelCase_) __UpperCAmelCase : List[Any] = MBartTokenizer.from_pretrained(UpperCamelCase_) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_) @require_torch def a_ ( self : Optional[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors="pt") __UpperCAmelCase : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self : Optional[int]): """simple docstring""" __UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens) , return_tensors="pt" , ) __UpperCAmelCase : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id) self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_) self.assertEqual((2, 14) , batch.input_ids.shape) self.assertEqual((2, 14) , batch.attention_mask.shape) __UpperCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCamelCase_) self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , []) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : List[str] = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors="pt") __UpperCAmelCase : Any = self.tokenizer( text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors="pt") __UpperCAmelCase : int = targets["input_ids"] __UpperCAmelCase : Any = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1] , 3) self.assertEqual(batch.decoder_input_ids.shape[1] , 10) @require_torch def a_ ( self : int): """simple docstring""" __UpperCAmelCase : int = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR") self.assertEqual( nested_simplify(UpperCamelCase_) , { # A, test, EOS, en_XX "input_ids": [[62, 3034, 2, 250004]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 250001, } , )
77
0
import heapq as hq import math from collections.abc import Iterator class __A : '''simple docstring''' def __init__( self , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = str(id_ ) lowerCamelCase__ = None lowerCamelCase__ = None lowerCamelCase__ = [] lowerCamelCase__ = {} # {vertex:distance} def __lt__( self , __lowerCAmelCase ): '''simple docstring''' return self.key < other.key def __repr__( self ): '''simple docstring''' return self.id def __lowerCamelCase ( self , __lowerCAmelCase ): '''simple docstring''' self.neighbors.append(UpperCamelCase_ ) def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ): '''simple docstring''' lowerCamelCase__ = weight def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> Union[str, Any]: '''simple docstring''' graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] ,__snake_case ) graph[b - 1].add_edge(graph[a - 1] ,__snake_case ) def lowerCAmelCase__(__snake_case ,__snake_case ) -> list: '''simple docstring''' lowerCamelCase__ = [] for u in graph: lowerCamelCase__ = math.inf lowerCamelCase__ = None lowerCamelCase__ = 0 lowerCamelCase__ = graph[:] while q: lowerCamelCase__ = min(__snake_case ) q.remove(__snake_case ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): lowerCamelCase__ = u lowerCamelCase__ = u.edges[v.id] for i in range(1 ,len(__snake_case ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowerCAmelCase__(__snake_case ,__snake_case ) -> Iterator[tuple]: '''simple docstring''' for u in graph: lowerCamelCase__ = math.inf lowerCamelCase__ = None lowerCamelCase__ = 0 lowerCamelCase__ = list(__snake_case ) hq.heapify(__snake_case ) while h: lowerCamelCase__ = hq.heappop(__snake_case ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): lowerCamelCase__ = u lowerCamelCase__ = u.edges[v.id] hq.heapify(__snake_case ) for i in range(1 ,len(__snake_case ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowerCAmelCase__() -> None: '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
481
"""simple docstring""" from typing import Any class a__ : def __init__( self : List[str] , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : str = data __UpperCAmelCase : Optional[Any] = None class a__ : def __init__( self : Any): """simple docstring""" __UpperCAmelCase : Optional[int] = None def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.head while temp is not None: print(temp.data , end=" ") __UpperCAmelCase : Tuple = temp.next print() def a_ ( self : int , UpperCamelCase_ : Any): """simple docstring""" __UpperCAmelCase : List[str] = Node(UpperCamelCase_) __UpperCAmelCase : str = self.head __UpperCAmelCase : Optional[int] = new_node def a_ ( self : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str): """simple docstring""" if node_data_a == node_data_a: return else: __UpperCAmelCase : int = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Tuple = node_a.next __UpperCAmelCase : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: __UpperCAmelCase : Optional[Any] = node_a.next if node_a is None or node_a is None: return __UpperCAmelCase , __UpperCAmelCase : Any = node_a.data, node_a.data if __name__ == "__main__": A = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print("""After swapping""") ll.print_list()
77
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE : Optional[int] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Any = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
635
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore A = """ Human: <<task>> Assistant: """ A = """huggingface-tools/default-prompts""" A = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""} def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase="run" ) -> List[str]: """simple docstring""" if prompt_or_repo_id is None: __UpperCAmelCase : Optional[int] = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s" , UpperCamelCase ) is not None: return prompt_or_repo_id __UpperCAmelCase : str = cached_file( UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} ) with open(UpperCamelCase , "r" , encoding="utf-8" ) as f: return f.read()
77
0
import numpy as np from PIL import Image def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->np.ndarray: _UpperCAmelCase =np.array(_lowerCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) _UpperCAmelCase =0 _UpperCAmelCase =0 _UpperCAmelCase =0 _UpperCAmelCase =0 # compute the shape of the output matrix _UpperCAmelCase =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape maxpool_shape _UpperCAmelCase =np.zeros((maxpool_shape, maxpool_shape) ) while i < arr.shape[0]: if i + size > arr.shape[0]: # if the end of the matrix is reached, break break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the maximum of the pooling matrix _UpperCAmelCase =np.max(arr[i : i + size, j : j + size] ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCAmelCase =0 _UpperCAmelCase =0 return updated_arr def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->np.ndarray: _UpperCAmelCase =np.array(_lowerCamelCase ) if arr.shape[0] != arr.shape[1]: raise ValueError("The input array is not a square matrix" ) _UpperCAmelCase =0 _UpperCAmelCase =0 _UpperCAmelCase =0 _UpperCAmelCase =0 # compute the shape of the output matrix _UpperCAmelCase =(arr.shape[0] - size) // stride + 1 # initialize the output matrix with zeros of shape avgpool_shape _UpperCAmelCase =np.zeros((avgpool_shape, avgpool_shape) ) while i < arr.shape[0]: # if the end of the matrix is reached, break if i + size > arr.shape[0]: break while j < arr.shape[1]: # if the end of the matrix is reached, break if j + size > arr.shape[1]: break # compute the average of the pooling matrix _UpperCAmelCase =int(np.average(arr[i : i + size, j : j + size] ) ) # shift the pooling matrix by stride of column pixels j += stride mat_j += 1 # shift the pooling matrix by stride of row pixels i += stride mat_i += 1 # reset the column index to 0 _UpperCAmelCase =0 _UpperCAmelCase =0 return updated_arr # Main Function if __name__ == "__main__": from doctest import testmod testmod(name='avgpooling', verbose=True) # Loading the image snake_case__ : List[str] = Image.open('path_to_image') # Converting the image to numpy array and maxpooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show() # Converting the image to numpy array and averagepooling, displaying the result # Ensure that the image is a square matrix Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
408
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available A = { """configuration_ernie""": ["""ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ErnieConfig""", """ErnieOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ErnieForCausalLM""", """ErnieForMaskedLM""", """ErnieForMultipleChoice""", """ErnieForNextSentencePrediction""", """ErnieForPreTraining""", """ErnieForQuestionAnswering""", """ErnieForSequenceClassification""", """ErnieForTokenClassification""", """ErnieModel""", """ErniePreTrainedModel""", ] if TYPE_CHECKING: from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ernie import ( ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST, ErnieForCausalLM, ErnieForMaskedLM, ErnieForMultipleChoice, ErnieForNextSentencePrediction, ErnieForPreTraining, ErnieForQuestionAnswering, ErnieForSequenceClassification, ErnieForTokenClassification, ErnieModel, ErniePreTrainedModel, ) else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch A_ : str = 'sshleifer/bart-tiny-random' A_ : Dict = 'patrickvonplaten/t5-tiny-random' @require_torch class _lowerCAmelCase( unittest.TestCase ): """simple docstring""" @cached_property def _a ( self ): return AutoConfig.from_pretrained(UpperCamelCase_ ) def _a ( self ): UpperCamelCase_: Any = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def _a ( self ): UpperCamelCase_: str = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase_ ) def _a ( self ): UpperCamelCase_: List[Any] = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def _a ( self ): UpperCamelCase_: int = create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def _a ( self ): with self.assertRaises(UpperCamelCase_ ): create_student_by_copying_alternating_layers(UpperCamelCase_ , tempfile.mkdtemp() , e=UpperCamelCase_ , d=UpperCamelCase_ )
57
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class a__ ( nn.Module ): def __init__( self : Union[str, Any]): """simple docstring""" super().__init__() __UpperCAmelCase : Optional[int] = nn.Linear(3 , 4) __UpperCAmelCase : str = nn.BatchNormad(4) __UpperCAmelCase : int = nn.Linear(4 , 5) def a_ ( self : str , UpperCamelCase_ : List[str]): """simple docstring""" return self.lineara(self.batchnorm(self.lineara(UpperCamelCase_))) class a__ ( unittest.TestCase ): def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Optional[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , model.state_dict()) __UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase_ , "index.json") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: __UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase_ , F"{key}.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) # TODO: add tests on the fact weights are properly loaded def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : int = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: __UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=UpperCamelCase_) with TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Tuple = offload_weight(UpperCamelCase_ , "weight" , UpperCamelCase_ , {}) __UpperCAmelCase : Dict = os.path.join(UpperCamelCase_ , "weight.dat") self.assertTrue(os.path.isfile(UpperCamelCase_)) self.assertDictEqual(UpperCamelCase_ , {"weight": {"shape": [2, 3], "dtype": str(UpperCamelCase_).split(".")[1]}}) __UpperCAmelCase : Optional[Any] = load_offloaded_weight(UpperCamelCase_ , index["weight"]) self.assertTrue(torch.equal(UpperCamelCase_ , UpperCamelCase_)) def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : List[Any] = ModelForTest() __UpperCAmelCase : Optional[int] = model.state_dict() __UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" not in k} __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) __UpperCAmelCase : Optional[int] = {k: v for k, v in state_dict.items() if "weight" in k} __UpperCAmelCase : Optional[Any] = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Optional[Any] = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) with TemporaryDirectory() as tmp_dir: offload_state_dict(UpperCamelCase_ , UpperCamelCase_) # Duplicates are removed __UpperCAmelCase : str = OffloadedWeightsLoader(state_dict=UpperCamelCase_ , save_folder=UpperCamelCase_) # Every key is there with the right value self.assertEqual(sorted(UpperCamelCase_) , sorted(state_dict.keys())) for key, param in state_dict.items(): self.assertTrue(torch.allclose(UpperCamelCase_ , weight_map[key])) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Any = {"a.1": 0, "a.10": 1, "a.2": 2} __UpperCAmelCase : Union[str, Any] = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1": 0, "a.2": 2}) __UpperCAmelCase : int = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} __UpperCAmelCase : int = extract_submodules_state_dict(UpperCamelCase_ , ["a.1", "a.2"]) self.assertDictEqual(UpperCamelCase_ , {"a.1.a": 0, "a.2.a": 2})
77
0
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / "utils")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 SCREAMING_SNAKE_CASE__ = get_tests_dir("fixtures") class snake_case (unittest.TestCase ): def _a ( self ) -> str: lowercase__ = mock.Mock() lowercase__ = 500 lowercase__ = {} lowercase__ = HTTPError lowercase__ = {} # Download this model to make sure it's in the cache. lowercase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.Session.request" ,return_value=UpperCamelCase_ ) as mock_head: lowercase__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" ) # This check we did call the fake head request mock_head.assert_called() def _a ( self ) -> int: lowercase__ = ViTImageProcessor.from_pretrained( "https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" ) def _a ( self ) -> str: with self.assertRaises(UpperCamelCase_ ): # config is in subfolder, the following should not work without specifying the subfolder lowercase__ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" ) lowercase__ = AutoImageProcessor.from_pretrained( "hf-internal-testing/stable-diffusion-all-variants" ,subfolder="feature_extractor" ) self.assertIsNotNone(UpperCamelCase_ ) @is_staging_test class snake_case (unittest.TestCase ): @classmethod def _a ( cls ) -> Optional[Any]: lowercase__ = TOKEN HfFolder.save_token(UpperCamelCase_ ) @classmethod def _a ( cls ) -> Union[str, Any]: try: delete_repo(token=cls._token ,repo_id="test-image-processor" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="valid_org/test-image-processor-org" ) except HTTPError: pass try: delete_repo(token=cls._token ,repo_id="test-dynamic-image-processor" ) except HTTPError: pass def _a ( self ) -> List[str]: lowercase__ = ViTImageProcessor.from_pretrained(UpperCamelCase_ ) image_processor.push_to_hub("test-image-processor" ,use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ ,getattr(UpperCamelCase_ ,UpperCamelCase_ ) ) # Reset repo delete_repo(token=self._token ,repo_id="test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( UpperCamelCase_ ,repo_id="test-image-processor" ,push_to_hub=UpperCamelCase_ ,use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained(F'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ ,getattr(UpperCamelCase_ ,UpperCamelCase_ ) ) def _a ( self ) -> Union[str, Any]: lowercase__ = ViTImageProcessor.from_pretrained(UpperCamelCase_ ) image_processor.push_to_hub("valid_org/test-image-processor" ,use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ ,getattr(UpperCamelCase_ ,UpperCamelCase_ ) ) # Reset repo delete_repo(token=self._token ,repo_id="valid_org/test-image-processor" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( UpperCamelCase_ ,repo_id="valid_org/test-image-processor-org" ,push_to_hub=UpperCamelCase_ ,use_auth_token=self._token ) lowercase__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" ) for k, v in image_processor.__dict__.items(): self.assertEqual(UpperCamelCase_ ,getattr(UpperCamelCase_ ,UpperCamelCase_ ) ) def _a ( self ) -> int: CustomImageProcessor.register_for_auto_class() lowercase__ = CustomImageProcessor.from_pretrained(UpperCamelCase_ ) image_processor.push_to_hub("test-dynamic-image-processor" ,use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map ,{"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} ,) lowercase__ = AutoImageProcessor.from_pretrained( F'''{USER}/test-dynamic-image-processor''' ,trust_remote_code=UpperCamelCase_ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ ,"CustomImageProcessor" )
267
"""simple docstring""" def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> int: """simple docstring""" __UpperCAmelCase : Dict = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): __UpperCAmelCase : Union[str, Any] = n - k # Calculate C(n,k) for i in range(UpperCamelCase ): result *= n - i result //= i + 1 return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return binomial_coefficient(2 * node_count , UpperCamelCase ) // (node_count + 1) def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" if n < 0: raise ValueError("factorial() not defined for negative values" ) __UpperCAmelCase : Optional[Any] = 1 for i in range(1 , n + 1 ): result *= i return result def _UpperCamelCase ( UpperCamelCase ) -> int: """simple docstring""" return catalan_number(UpperCamelCase ) * factorial(UpperCamelCase ) if __name__ == "__main__": A = int(input("""Enter the number of nodes: """).strip() or 0) if node_count <= 0: raise ValueError("""We need some nodes to work with.""") print( f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' f'''binary trees and {catalan_number(node_count)} binary search trees.''' )
77
0
"""simple docstring""" import sys def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Tuple: _lowerCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ ) _lowerCamelCase : str = [[0 for x in range(SCREAMING_SNAKE_CASE_ )] for x in range(SCREAMING_SNAKE_CASE_ )] _lowerCamelCase : Tuple = [[0 for x in range(SCREAMING_SNAKE_CASE_ )] for x in range(SCREAMING_SNAKE_CASE_ )] for chain_length in range(2 , SCREAMING_SNAKE_CASE_ ): for a in range(1 , n - chain_length + 1 ): _lowerCamelCase : Optional[Any] = a + chain_length - 1 _lowerCamelCase : List[str] = sys.maxsize for c in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): _lowerCamelCase : Optional[int] = ( matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b] ) if cost < matrix[a][b]: _lowerCamelCase : Any = cost _lowerCamelCase : List[Any] = c return matrix, sol def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]: if i == j: print('''A''' + str(SCREAMING_SNAKE_CASE_ ) , end=''' ''' ) else: print('''(''' , end=''' ''' ) print_optiomal_solution(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , optimal_solution[i][j] ) print_optiomal_solution(SCREAMING_SNAKE_CASE_ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE_ ) print(''')''' , end=''' ''' ) def UpperCamelCase ( ) ->List[str]: _lowerCamelCase : List[str] = [30, 35, 15, 5, 10, 20, 25] _lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE_ ) # Size of matrix created from above array will be # 30*35 35*15 15*5 5*10 10*20 20*25 _lowerCamelCase : Optional[Any] = matrix_chain_order(SCREAMING_SNAKE_CASE_ ) print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) ) print_optiomal_solution(SCREAMING_SNAKE_CASE_ , 1 , n - 1 ) if __name__ == "__main__": main()
434
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) A = { """configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""], """processing_trocr""": ["""TrOCRProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A = [ """TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrOCRForCausalLM""", """TrOCRPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
77
0
"""simple docstring""" import unittest import torch from torch import nn from diffusers.models.activations import get_activation class lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[int] = get_activation("""swish""" ) self.assertIsInstance(UpperCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Tuple = get_activation("""silu""" ) self.assertIsInstance(UpperCamelCase_ , nn.SiLU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = get_activation("""mish""" ) self.assertIsInstance(UpperCamelCase_ , nn.Mish ) self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 ) def _a (self ): """simple docstring""" UpperCAmelCase__ : Union[str, Any] = get_activation("""gelu""" ) self.assertIsInstance(UpperCamelCase_ , nn.GELU ) self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 ) self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 ) self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
182
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class a__ ( unittest.TestCase ): def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ): """simple docstring""" __UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18} __UpperCAmelCase : List[Any] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[Any] = image_size __UpperCAmelCase : str = min_resolution __UpperCAmelCase : Tuple = max_resolution __UpperCAmelCase : Optional[Any] = do_resize __UpperCAmelCase : Any = size __UpperCAmelCase : Any = do_normalize __UpperCAmelCase : Any = image_mean __UpperCAmelCase : Optional[Any] = image_std def a_ ( self : str): """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ViTImageProcessor if is_vision_available() else None def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self) @property def a_ ( self : Union[str, Any]): """simple docstring""" return self.image_proc_tester.prepare_image_processor_dict() def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase_ , "image_mean")) self.assertTrue(hasattr(UpperCamelCase_ , "image_std")) self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize")) self.assertTrue(hasattr(UpperCamelCase_ , "do_resize")) self.assertTrue(hasattr(UpperCamelCase_ , "size")) def a_ ( self : Dict): """simple docstring""" pass def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict) # create random PIL images __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors __UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray) # Test not batched input __UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) def a_ ( self : Any): """simple docstring""" __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors __UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["height"], self.image_proc_tester.size["width"], ) , )
77
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ : Tuple = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class UpperCamelCase ( _UpperCAmelCase ): lowerCAmelCase : Union[str, Any] = """vivit""" def __init__( self , UpperCAmelCase__=224 , UpperCAmelCase__=32 , UpperCAmelCase__=[2, 16, 16] , UpperCAmelCase__=3 , UpperCAmelCase__=768 , UpperCAmelCase__=12 , UpperCAmelCase__=12 , UpperCAmelCase__=3_072 , UpperCAmelCase__="gelu_fast" , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.0 , UpperCAmelCase__=0.02 , UpperCAmelCase__=1e-0_6 , UpperCAmelCase__=True , **UpperCAmelCase__ , ): A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = initializer_range A__ = layer_norm_eps A__ = image_size A__ = num_frames A__ = tubelet_size A__ = num_channels A__ = qkv_bias super().__init__(**UpperCamelCase_ )
491
"""simple docstring""" from collections import namedtuple A = namedtuple("""from_to""", """from_ to""") A = { """cubicmeter""": from_to(1, 1), """litre""": from_to(0.001, 1_000), """kilolitre""": from_to(1, 1), """gallon""": from_to(0.00454, 264.172), """cubicyard""": from_to(0.76455, 1.30795), """cubicfoot""": from_to(0.028, 35.3147), """cup""": from_to(0.000236588, 4226.75), } def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: """simple docstring""" if from_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'from_type' value: {from_type!r} Supported values are:\n" + ", ".join(UpperCamelCase ) ) if to_type not in METRIC_CONVERSION: raise ValueError( f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n" + ", ".join(UpperCamelCase ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
77
0
import comet # From: unbabel-comet import torch import datasets SCREAMING_SNAKE_CASE__ : Optional[int] = datasets.logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Tuple = """\ @inproceedings{rei-EtAl:2020:WMT, author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon}, title = {Unbabel's Participation in the WMT20 Metrics Shared Task}, booktitle = {Proceedings of the Fifth Conference on Machine Translation}, month = {November}, year = {2020}, address = {Online}, publisher = {Association for Computational Linguistics}, pages = {909--918}, } @inproceedings{rei-etal-2020-comet, title = \"{COMET}: A Neural Framework for {MT} Evaluation\", author = \"Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon\", booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\", month = nov, year = \"2020\", address = \"Online\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\", pages = \"2685--2702\", } """ SCREAMING_SNAKE_CASE__ : Optional[Any] = """\ Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM). With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition. See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information. """ SCREAMING_SNAKE_CASE__ : List[str] = """ COMET score. Args: `sources` (list of str): Source sentences `predictions` (list of str): candidate translations `references` (list of str): reference translations `cuda` (bool): If set to True, runs COMET using GPU `show_progress` (bool): Shows progress `model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None. Returns: `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`. `scores`: List of scores. Examples: >>> comet_metric = datasets.load_metric('comet') >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"] >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"] >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"] >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source) >>> print([round(v, 2) for v in results[\"scores\"]]) [0.19, 0.92] """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def __UpperCAmelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """sources""": datasets.Value("""string""" , id="""sequence""" ), """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[ """https://github.com/Unbabel/COMET""", """https://www.aclweb.org/anthology/2020.emnlp-main.213/""", """http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""", ] , ) def __UpperCAmelCase ( self , _lowerCAmelCase ): if self.config_name == "default": UpperCAmelCase__ : str = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) ) else: UpperCAmelCase__ : Dict = comet.load_from_checkpoint(comet.download_model(self.config_name ) ) def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=False ): if gpus is None: UpperCAmelCase__ : Optional[Any] = 1 if torch.cuda.is_available() else 0 UpperCAmelCase__ : List[str] = {"src": sources, "mt": predictions, "ref": references} UpperCAmelCase__ : str = [dict(zip(UpperCamelCase_ , UpperCamelCase_ ) ) for t in zip(*data.values() )] UpperCAmelCase__ : Optional[int] = self.scorer.predict(UpperCamelCase_ , gpus=UpperCamelCase_ , progress_bar=UpperCamelCase_ ) return {"mean_score": mean_score, "scores": scores}
79
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class a__ ( __magic_name__ , unittest.TestCase ): lowercase_ = ShapEPipeline lowercase_ = ["prompt"] lowercase_ = ["prompt"] lowercase_ = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] lowercase_ = False @property def a_ ( self : Optional[int]): """simple docstring""" return 32 @property def a_ ( self : Any): """simple docstring""" return 32 @property def a_ ( self : int): """simple docstring""" return self.time_input_dim * 4 @property def a_ ( self : List[Any]): """simple docstring""" return 8 @property def a_ ( self : List[Any]): """simple docstring""" __UpperCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def a_ ( self : List[str]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModelWithProjection(UpperCamelCase_) @property def a_ ( self : Any): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } __UpperCAmelCase : Dict = PriorTransformer(**UpperCamelCase_) return model @property def a_ ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) __UpperCAmelCase : Tuple = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } __UpperCAmelCase : List[Any] = ShapERenderer(**UpperCamelCase_) return model def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.dummy_prior __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : int = self.dummy_renderer __UpperCAmelCase : Tuple = HeunDiscreteScheduler( beta_schedule="exp" , num_train_timesteps=1024 , prediction_type="sample" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , ) __UpperCAmelCase : str = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "renderer": renderer, "scheduler": scheduler, } return components def a_ ( self : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Any=0): """simple docstring""" if str(UpperCamelCase_).startswith("mps"): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase_) else: __UpperCAmelCase : str = torch.Generator(device=UpperCamelCase_).manual_seed(UpperCamelCase_) __UpperCAmelCase : List[Any] = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "np", } return inputs def a_ ( self : Tuple): """simple docstring""" __UpperCAmelCase : str = "cpu" __UpperCAmelCase : Union[str, Any] = self.get_dummy_components() __UpperCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_)) __UpperCAmelCase : Union[str, Any] = output.images[0] __UpperCAmelCase : str = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __UpperCAmelCase : Union[str, Any] = np.array( [ 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, 0.00039216, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self : Tuple): """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Union[str, Any] = torch_device == "cpu" __UpperCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , ) def a_ ( self : Union[str, Any]): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : List[str] = self.pipeline_class(**UpperCamelCase_) __UpperCAmelCase : int = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Any = 2 __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) for key in inputs.keys(): if key in self.batch_params: __UpperCAmelCase : List[Any] = batch_size * [inputs[key]] __UpperCAmelCase : List[Any] = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class a__ ( unittest.TestCase ): def a_ ( self : List[str]): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self : List[str]): """simple docstring""" __UpperCAmelCase : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy") __UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e") __UpperCAmelCase : Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase_).manual_seed(0) __UpperCAmelCase : int = pipe( "a shark" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_)
77
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( lowercase , lowercase , unittest.TestCase ): lowerCamelCase_ : str = StableDiffusionPanoramaPipeline lowerCamelCase_ : Tuple = TEXT_TO_IMAGE_PARAMS lowerCamelCase_ : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS lowerCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS lowerCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self : int): torch.manual_seed(0) SCREAMING_SNAKE_CASE_ :Optional[int] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE_ :List[str] = DDIMScheduler() torch.manual_seed(0) SCREAMING_SNAKE_CASE_ :Dict = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0) SCREAMING_SNAKE_CASE_ :Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) SCREAMING_SNAKE_CASE_ :Optional[int] = CLIPTextModel(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") SCREAMING_SNAKE_CASE_ :List[str] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _snake_case ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : List[Any]=0): SCREAMING_SNAKE_CASE_ :Union[str, Any] = torch.manual_seed(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Dict = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _snake_case ( self : List[Any]): SCREAMING_SNAKE_CASE_ :Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ :Tuple = self.get_dummy_components() SCREAMING_SNAKE_CASE_ :Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Optional[int] = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Tuple = self.get_dummy_inputs(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Optional[int] = sd_pipe(**UpperCamelCase_).images SCREAMING_SNAKE_CASE_ :Any = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ :Any = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def _snake_case ( self : List[Any]): super().test_inference_batch_consistent(batch_sizes=[1, 2]) def _snake_case ( self : Optional[int]): super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25E-3) def _snake_case ( self : int): SCREAMING_SNAKE_CASE_ :Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ :Any = StableDiffusionPanoramaPipeline(**UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Dict = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Any = self.get_dummy_inputs(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :List[str] = "french fries" SCREAMING_SNAKE_CASE_ :Any = sd_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Optional[Any] = output.images SCREAMING_SNAKE_CASE_ :List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ :str = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def _snake_case ( self : List[str]): SCREAMING_SNAKE_CASE_ :Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ :Dict = StableDiffusionPanoramaPipeline(**UpperCamelCase_) SCREAMING_SNAKE_CASE_ :List[Any] = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :int = sd_pipe(**UpperCamelCase_ , view_batch_size=2) SCREAMING_SNAKE_CASE_ :Any = output.images SCREAMING_SNAKE_CASE_ :Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ :Optional[Any] = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def _snake_case ( self : Optional[int]): SCREAMING_SNAKE_CASE_ :Any = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.get_dummy_components() SCREAMING_SNAKE_CASE_ :Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear") SCREAMING_SNAKE_CASE_ :Union[str, Any] = StableDiffusionPanoramaPipeline(**UpperCamelCase_) SCREAMING_SNAKE_CASE_ :List[str] = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_dummy_inputs(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :str = sd_pipe(**UpperCamelCase_).images SCREAMING_SNAKE_CASE_ :List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ :int = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def _snake_case ( self : Any): SCREAMING_SNAKE_CASE_ :List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE_ :int = self.get_dummy_components() SCREAMING_SNAKE_CASE_ :List[Any] = PNDMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Tuple = StableDiffusionPanoramaPipeline(**UpperCamelCase_) SCREAMING_SNAKE_CASE_ :str = sd_pipe.to(UpperCamelCase_) sd_pipe.set_progress_bar_config(disable=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Optional[int] = self.get_dummy_inputs(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Union[str, Any] = sd_pipe(**UpperCamelCase_).images SCREAMING_SNAKE_CASE_ :Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE_ :Any = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def _snake_case ( self : List[Any]): super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self : Optional[Any] , UpperCAmelCase : Optional[int]=0): SCREAMING_SNAKE_CASE_ :List[str] = torch.manual_seed(UpperCamelCase_) SCREAMING_SNAKE_CASE_ :List[Any] = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _snake_case ( self : Dict): SCREAMING_SNAKE_CASE_ :Optional[Any] = "stabilityai/stable-diffusion-2-base" SCREAMING_SNAKE_CASE_ :Dict = DDIMScheduler.from_pretrained(UpperCamelCase_ , subfolder="scheduler") SCREAMING_SNAKE_CASE_ :List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_) pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_inputs() SCREAMING_SNAKE_CASE_ :Optional[int] = pipe(**UpperCamelCase_).images SCREAMING_SNAKE_CASE_ :int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) SCREAMING_SNAKE_CASE_ :Tuple = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ]) assert np.abs(expected_slice - image_slice).max() < 1E-2 def _snake_case ( self : int): SCREAMING_SNAKE_CASE_ :Dict = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base" , safety_checker=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ :Dict = self.get_inputs() SCREAMING_SNAKE_CASE_ :Optional[int] = pipe(**UpperCamelCase_).images SCREAMING_SNAKE_CASE_ :Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_12, 20_48, 3) SCREAMING_SNAKE_CASE_ :Optional[int] = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ]) assert np.abs(expected_slice - image_slice).max() < 1E-3 def _snake_case ( self : Dict): SCREAMING_SNAKE_CASE_ :List[Any] = 0 def callback_fn(UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : torch.FloatTensor) -> None: SCREAMING_SNAKE_CASE_ :Any = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE_ :List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) SCREAMING_SNAKE_CASE_ :Dict = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ :Union[str, Any] = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 elif step == 2: SCREAMING_SNAKE_CASE_ :Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 2_56) SCREAMING_SNAKE_CASE_ :Any = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE_ :Optional[int] = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5E-2 SCREAMING_SNAKE_CASE_ :Dict = False SCREAMING_SNAKE_CASE_ :Any = "stabilityai/stable-diffusion-2-base" SCREAMING_SNAKE_CASE_ :str = DDIMScheduler.from_pretrained(UpperCamelCase_ , subfolder="scheduler") SCREAMING_SNAKE_CASE_ :Optional[int] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Any = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE_ :str = self.get_inputs() pipe(**UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self : str): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE_ :Optional[int] = "stabilityai/stable-diffusion-2-base" SCREAMING_SNAKE_CASE_ :Any = DDIMScheduler.from_pretrained(UpperCamelCase_ , subfolder="scheduler") SCREAMING_SNAKE_CASE_ :List[str] = StableDiffusionPanoramaPipeline.from_pretrained(UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_) SCREAMING_SNAKE_CASE_ :Tuple = pipe.to(UpperCamelCase_) pipe.set_progress_bar_config(disable=UpperCamelCase_) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE_ :int = self.get_inputs() SCREAMING_SNAKE_CASE_ :Optional[Any] = pipe(**UpperCamelCase_) SCREAMING_SNAKE_CASE_ :str = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
631
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np import torch from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging A = logging.get_logger(__name__) class a__ ( __magic_name__ ): lowercase_ = ["input_features", "is_longer"] def __init__( self : List[str] , UpperCamelCase_ : Dict=64 , UpperCamelCase_ : Tuple=48000 , UpperCamelCase_ : List[Any]=480 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=1024 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : float = 0 , UpperCamelCase_ : float = 14000 , UpperCamelCase_ : int = None , UpperCamelCase_ : str = "fusion" , UpperCamelCase_ : str = "repeatpad" , **UpperCamelCase_ : Optional[Any] , ): """simple docstring""" super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __UpperCAmelCase : Union[str, Any] = top_db __UpperCAmelCase : Optional[Any] = truncation __UpperCAmelCase : str = padding __UpperCAmelCase : int = fft_window_size __UpperCAmelCase : str = (fft_window_size >> 1) + 1 __UpperCAmelCase : List[Any] = hop_length __UpperCAmelCase : Optional[Any] = max_length_s __UpperCAmelCase : Tuple = max_length_s * sampling_rate __UpperCAmelCase : str = sampling_rate __UpperCAmelCase : int = frequency_min __UpperCAmelCase : Optional[Any] = frequency_max __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm=UpperCamelCase_ , mel_scale="htk" , ) __UpperCAmelCase : Any = mel_filter_bank( num_frequency_bins=self.nb_frequency_bins , num_mel_filters=UpperCamelCase_ , min_frequency=UpperCamelCase_ , max_frequency=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , norm="slaney" , mel_scale="slaney" , ) def a_ ( self : Dict): """simple docstring""" __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__) __UpperCAmelCase : str = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] if "mel_filters_slaney" in output: del output["mel_filters_slaney"] return output def a_ ( self : int , UpperCamelCase_ : np.array , UpperCamelCase_ : Optional[np.array] = None): """simple docstring""" __UpperCAmelCase : List[Any] = spectrogram( UpperCamelCase_ , window_function(self.fft_window_size , "hann") , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=UpperCamelCase_ , log_mel="dB" , ) return log_mel_spectrogram.T def a_ ( self : List[Any] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : int): """simple docstring""" __UpperCAmelCase : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1)) , 3) if len(ranges[1]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : str = [0] if len(ranges[2]) == 0: # if the audio is too short, we just use the first chunk __UpperCAmelCase : Dict = [0] # randomly choose index for each part __UpperCAmelCase : Dict = np.random.choice(ranges[0]) __UpperCAmelCase : List[str] = np.random.choice(ranges[1]) __UpperCAmelCase : List[Any] = np.random.choice(ranges[2]) __UpperCAmelCase : List[Any] = mel[idx_front : idx_front + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_middle : idx_middle + chunk_frames, :] __UpperCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :] __UpperCAmelCase : Tuple = torch.tensor(mel[None, None, :]) __UpperCAmelCase : Union[str, Any] = torch.nn.functional.interpolate( UpperCamelCase_ , size=[chunk_frames, 64] , mode="bilinear" , align_corners=UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = mel_shrink[0][0].numpy() __UpperCAmelCase : Optional[int] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0) return mel_fusion def a_ ( self : Optional[Any] , UpperCamelCase_ : np.array , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple , UpperCamelCase_ : Optional[Any]): """simple docstring""" if waveform.shape[0] > max_length: if truncation == "rand_trunc": __UpperCAmelCase : List[str] = True # random crop to max_length (for compatibility) -> this should be handled by self.pad __UpperCAmelCase : List[Any] = len(UpperCamelCase_) - max_length __UpperCAmelCase : int = np.random.randint(0 , overflow + 1) __UpperCAmelCase : Union[str, Any] = waveform[idx : idx + max_length] __UpperCAmelCase : Union[str, Any] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] elif truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : Dict = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed __UpperCAmelCase : Tuple = mel.shape[0] if chunk_frames == total_frames: # there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length. # In this case, we just use the whole audio. __UpperCAmelCase : List[str] = np.stack([mel, mel, mel, mel] , axis=0) __UpperCAmelCase : Any = False else: __UpperCAmelCase : List[str] = self._random_mel_fusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_) __UpperCAmelCase : Union[str, Any] = True else: raise NotImplementedError(F"data_truncating {truncation} not implemented") else: __UpperCAmelCase : Optional[Any] = False # only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding if waveform.shape[0] < max_length: if padding == "repeat": __UpperCAmelCase : Tuple = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : List[str] = np.stack(np.tile(UpperCamelCase_ , n_repeat + 1))[:max_length] if padding == "repeatpad": __UpperCAmelCase : Union[str, Any] = int(max_length / len(UpperCamelCase_)) __UpperCAmelCase : Optional[Any] = np.stack(np.tile(UpperCamelCase_ , UpperCamelCase_)) __UpperCAmelCase : int = np.pad(UpperCamelCase_ , (0, max_length - waveform.shape[0]) , mode="constant" , constant_values=0) if truncation == "fusion": __UpperCAmelCase : Any = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters) __UpperCAmelCase : List[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0) else: __UpperCAmelCase : Optional[int] = self._np_extract_fbank_features(UpperCamelCase_ , self.mel_filters_slaney)[None, :] return input_mel, longer def __call__( self : Dict , UpperCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase_ : str = None , UpperCamelCase_ : Optional[str] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[Union[str, TensorType]] = None , **UpperCamelCase_ : Any , ): """simple docstring""" __UpperCAmelCase : int = truncation if truncation is not None else self.truncation __UpperCAmelCase : Optional[Any] = padding if padding else self.padding if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a" F" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input" F" was sampled with {self.sampling_rate} and not {sampling_rate}.") else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug.") __UpperCAmelCase : List[str] = isinstance(UpperCamelCase_ , np.ndarray) and len(raw_speech.shape) > 1 if is_batched_numpy and len(raw_speech.shape) > 2: raise ValueError(F"Only mono-channel audio is supported for input to {self}") __UpperCAmelCase : str = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list))) ) if is_batched: __UpperCAmelCase : Dict = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray): __UpperCAmelCase : Tuple = np.asarray(UpperCamelCase_ , dtype=np.floataa) elif isinstance(UpperCamelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa): __UpperCAmelCase : Optional[int] = raw_speech.astype(np.floataa) # always return batch if not is_batched: __UpperCAmelCase : int = [np.asarray(UpperCamelCase_)] # convert to mel spectrogram, truncate and pad if needed. __UpperCAmelCase : Optional[int] = [ self._get_input_mel(UpperCamelCase_ , max_length if max_length else self.nb_max_samples , UpperCamelCase_ , UpperCamelCase_) for waveform in raw_speech ] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : List[Any] = [] for mel, longer in padded_inputs: input_mel.append(UpperCamelCase_) is_longer.append(UpperCamelCase_) if truncation == "fusion" and sum(UpperCamelCase_) == 0: # if no audio is longer than 10s, then randomly select one audio to be longer __UpperCAmelCase : Any = np.random.randint(0 , len(UpperCamelCase_)) __UpperCAmelCase : Optional[int] = True if isinstance(input_mel[0] , UpperCamelCase_): __UpperCAmelCase : Tuple = [np.asarray(UpperCamelCase_ , dtype=np.floataa) for feature in input_mel] # is_longer is a list of bool __UpperCAmelCase : List[str] = [[longer] for longer in is_longer] __UpperCAmelCase : Optional[int] = {"input_features": input_mel, "is_longer": is_longer} __UpperCAmelCase : Optional[int] = BatchFeature(UpperCamelCase_) if return_tensors is not None: __UpperCAmelCase : Any = input_features.convert_to_tensors(UpperCamelCase_) return input_features
77
0