code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 SCREAMING_SNAKE_CASE : int = get_tests_dir("fixtures/dummy_feature_extractor_config.json") SCREAMING_SNAKE_CASE : Optional[Any] = get_tests_dir("fixtures/vocab.json") SCREAMING_SNAKE_CASE : int = get_tests_dir("fixtures") class _lowerCamelCase( unittest.TestCase ): lowercase_ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" _lowercase : Optional[int] = 0 def UpperCamelCase ( self) -> List[str]: """simple docstring""" _lowercase : Optional[int] = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h') self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : Union[str, Any] = WavaVecaConfig() _lowercase : int = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h') # save in new folder model_config.save_pretrained(lowerCamelCase) processor.save_pretrained(lowerCamelCase) _lowercase : Optional[int] = AutoProcessor.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Any: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(lowerCamelCase, os.path.join(lowerCamelCase, lowerCamelCase)) copyfile(lowerCamelCase, os.path.join(lowerCamelCase, 'vocab.json')) _lowercase : Optional[Any] = AutoProcessor.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : Any = WavaVecaFeatureExtractor() _lowercase : int = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h') _lowercase : List[Any] = WavaVecaProcessor(lowerCamelCase, lowerCamelCase) # save in new folder processor.save_pretrained(lowerCamelCase) # drop `processor_class` in tokenizer with open(os.path.join(lowerCamelCase, lowerCamelCase), 'r') as f: _lowercase : Union[str, Any] = json.load(lowerCamelCase) config_dict.pop('processor_class') with open(os.path.join(lowerCamelCase, lowerCamelCase), 'w') as f: f.write(json.dumps(lowerCamelCase)) _lowercase : Any = AutoProcessor.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> int: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : List[str] = WavaVecaFeatureExtractor() _lowercase : int = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h') _lowercase : Optional[Any] = WavaVecaProcessor(lowerCamelCase, lowerCamelCase) # save in new folder processor.save_pretrained(lowerCamelCase) # drop `processor_class` in feature extractor with open(os.path.join(lowerCamelCase, lowerCamelCase), 'r') as f: _lowercase : Optional[Any] = json.load(lowerCamelCase) config_dict.pop('processor_class') with open(os.path.join(lowerCamelCase, lowerCamelCase), 'w') as f: f.write(json.dumps(lowerCamelCase)) _lowercase : Optional[Any] = AutoProcessor.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: _lowercase : str = WavaVecaConfig(processor_class='Wav2Vec2Processor') model_config.save_pretrained(lowerCamelCase) # copy relevant files copyfile(lowerCamelCase, os.path.join(lowerCamelCase, 'vocab.json')) # create emtpy sample processor with open(os.path.join(lowerCamelCase, lowerCamelCase), 'w') as f: f.write('{}') _lowercase : Optional[Any] = AutoProcessor.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" with self.assertRaises(lowerCamelCase): _lowercase : Dict = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor') # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase): _lowercase : str = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase) _lowercase : Optional[Any] = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase) self.assertTrue(processor.special_attribute_present) self.assertEqual(processor.__class__.__name__, 'NewProcessor') _lowercase : int = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present) self.assertEqual(feature_extractor.__class__.__name__, 'NewFeatureExtractor') _lowercase : Union[str, Any] = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizerFast') # Test we can also load the slow version _lowercase : str = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase, use_fast=lowerCamelCase) _lowercase : int = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present) self.assertEqual(new_tokenizer.__class__.__name__, 'NewTokenizer') else: self.assertEqual(tokenizer.__class__.__name__, 'NewTokenizer') def UpperCamelCase ( self) -> List[str]: """simple docstring""" try: AutoConfig.register('custom', lowerCamelCase) AutoFeatureExtractor.register(lowerCamelCase, lowerCamelCase) AutoTokenizer.register(lowerCamelCase, slow_tokenizer_class=lowerCamelCase) AutoProcessor.register(lowerCamelCase, lowerCamelCase) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase): AutoProcessor.register(lowerCamelCase, lowerCamelCase) # Now that the config is registered, it can be used as any other config with the auto-API _lowercase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: _lowercase : List[str] = os.path.join(lowerCamelCase, 'vocab.txt') with open(lowerCamelCase, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens])) _lowercase : Optional[int] = CustomTokenizer(lowerCamelCase) _lowercase : int = CustomProcessor(lowerCamelCase, lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(lowerCamelCase) _lowercase : Optional[int] = AutoProcessor.from_pretrained(lowerCamelCase) self.assertIsInstance(lowerCamelCase, lowerCamelCase) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase ( self) -> Dict: """simple docstring""" class _lowerCamelCase( _a ): lowercase_ : List[str] = False class _lowerCamelCase( _a ): lowercase_ : Optional[Any] = False class _lowerCamelCase( _a ): lowercase_ : Any = """AutoFeatureExtractor""" lowercase_ : Tuple = """AutoTokenizer""" lowercase_ : int = False try: AutoConfig.register('custom', lowerCamelCase) AutoFeatureExtractor.register(lowerCamelCase, lowerCamelCase) AutoTokenizer.register(lowerCamelCase, slow_tokenizer_class=lowerCamelCase) AutoProcessor.register(lowerCamelCase, lowerCamelCase) # If remote code is not set, the default is to use local classes. _lowercase : int = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor') self.assertEqual(processor.__class__.__name__, 'NewProcessor') self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) # If remote code is disabled, we load the local ones. _lowercase : Tuple = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase) self.assertEqual(processor.__class__.__name__, 'NewProcessor') self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) # If remote is enabled, we load from the Hub. _lowercase : int = AutoProcessor.from_pretrained( 'hf-internal-testing/test_dynamic_processor', trust_remote_code=lowerCamelCase) self.assertEqual(processor.__class__.__name__, 'NewProcessor') self.assertTrue(processor.special_attribute_present) self.assertTrue(processor.feature_extractor.special_attribute_present) self.assertTrue(processor.tokenizer.special_attribute_present) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" _lowercase : Any = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert') self.assertEqual(processor.__class__.__name__, 'BertTokenizerFast') def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : List[str] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext') self.assertEqual(processor.__class__.__name__, 'ConvNextImageProcessor') @is_staging_test class _lowerCamelCase( unittest.TestCase ): lowercase_ : Optional[int] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""] @classmethod def UpperCamelCase ( cls) -> Optional[int]: """simple docstring""" _lowercase : int = TOKEN HfFolder.save_token(lowerCamelCase) @classmethod def UpperCamelCase ( cls) -> Optional[int]: """simple docstring""" try: delete_repo(token=cls._token, repo_id='test-processor') except HTTPError: pass try: delete_repo(token=cls._token, repo_id='valid_org/test-processor-org') except HTTPError: pass try: delete_repo(token=cls._token, repo_id='test-dynamic-processor') except HTTPError: pass def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Tuple = WavaVecaProcessor.from_pretrained(lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowerCamelCase, 'test-processor'), push_to_hub=lowerCamelCase, use_auth_token=self._token) _lowercase : Tuple = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''') for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase, getattr(new_processor.feature_extractor, lowerCamelCase)) self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab()) def UpperCamelCase ( self) -> int: """simple docstring""" _lowercase : List[Any] = WavaVecaProcessor.from_pretrained(lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(lowerCamelCase, 'test-processor-org'), push_to_hub=lowerCamelCase, use_auth_token=self._token, organization='valid_org', ) _lowercase : Union[str, Any] = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org') for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(lowerCamelCase, getattr(new_processor.feature_extractor, lowerCamelCase)) self.assertDictEqual(new_processor.tokenizer.get_vocab(), processor.tokenizer.get_vocab()) def UpperCamelCase ( self) -> Any: """simple docstring""" CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() _lowercase : int = CustomFeatureExtractor.from_pretrained(lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: _lowercase : Optional[int] = os.path.join(lowerCamelCase, 'vocab.txt') with open(lowerCamelCase, 'w', encoding='utf-8') as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens])) _lowercase : Tuple = CustomTokenizer(lowerCamelCase) _lowercase : Union[str, Any] = CustomProcessor(lowerCamelCase, lowerCamelCase) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(F'''{USER}/test-dynamic-processor''', token=self._token) _lowercase : Dict = Repository(lowerCamelCase, clone_from=F'''{USER}/test-dynamic-processor''', token=self._token) processor.save_pretrained(lowerCamelCase) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map, { 'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor', 'AutoProcessor': 'custom_processing.CustomProcessor', }, ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(lowerCamelCase, 'tokenizer_config.json')) as f: _lowercase : int = json.load(lowerCamelCase) self.assertDictEqual( tokenizer_config['auto_map'], { 'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None], 'AutoProcessor': 'custom_processing.CustomProcessor', }, ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, 'custom_feature_extraction.py'))) self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, 'custom_tokenization.py'))) self.assertTrue(os.path.isfile(os.path.join(lowerCamelCase, 'custom_processing.py'))) repo.push_to_hub() _lowercase : List[str] = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''', trust_remote_code=lowerCamelCase) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__, 'CustomProcessor')
21
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' _a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: _a : Tuple = 1 - (matter_density + radiation_density + dark_energy) _a : int = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _a : List[str] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
294
0
'''simple docstring''' from __future__ import annotations def UpperCAmelCase_ ( __lowercase : list[float] , __lowercase : list[float] ) -> float: '''simple docstring''' _UpperCAmelCase = sorted(numsa + numsa ) _UpperCAmelCase , _UpperCAmelCase = divmod(len(__lowercase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE :Tuple = [float(x) for x in input('''Enter the elements of first array: ''').split()] __SCREAMING_SNAKE_CASE :Any = [float(x) for x in input('''Enter the elements of second array: ''').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
22
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : List[Any] ) -> Dict: _a : Optional[int] = tempfile.mkdtemp() _a : Optional[Any] = SamImageProcessor() _a : int = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : str ) -> int: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Tuple ) -> Dict: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Dict ) -> Dict: _a : List[Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) -> Tuple: _a : Optional[Any] = self.get_image_processor() _a : int = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Union[str, Any] = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _lowercase ( self : Optional[Any] ) -> Optional[Any]: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = [torch.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : Optional[int] = [[683, 1024]] _a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : int = processor.post_process_masks( UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : Optional[Any] = [np.ones((1, 3, 5, 5) )] _a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : List[str] = [[1, 0], [0, 1]] with self.assertRaises(UpperCAmelCase__ ): _a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) @require_vision @require_tf class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Any ) -> List[str]: _a : List[str] = tempfile.mkdtemp() _a : Any = SamImageProcessor() _a : Union[str, Any] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Dict ) -> List[str]: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: _a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> str: _a : Union[str, Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : int = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _lowercase ( self : Optional[Any] ) -> int: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Any = [tf.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : str = [[683, 1024]] _a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Union[str, Any] = processor.post_process_masks( UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : List[Any] = [np.ones((1, 3, 5, 5) )] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Dict = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _a : List[Any] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : str ) -> Optional[Any]: _a : Optional[Any] = tempfile.mkdtemp() _a : Dict = SamImageProcessor() _a : List[str] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Tuple ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : str ) -> int: _a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _lowercase ( self : int ) -> List[Any]: _a : Optional[Any] = self.get_image_processor() _a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _a : str = [tf.convert_to_tensor(UpperCAmelCase__ )] _a : Optional[int] = [torch.tensor(UpperCAmelCase__ )] _a : Union[str, Any] = [[1764, 2646]] _a : List[str] = [[683, 1024]] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) _a : List[str] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _lowercase ( self : str ) -> Optional[Any]: _a : List[Any] = self.get_image_processor() _a : Any = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Dict = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() _a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
294
0
'''simple docstring''' from itertools import product def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> list[int]: UpperCAmelCase : Union[str, Any] = sides_number UpperCAmelCase : Optional[Any] = max_face_number * dice_number UpperCAmelCase : Tuple = [0] * (max_total + 1) UpperCAmelCase : Optional[int] = 1 UpperCAmelCase : Union[str, Any] = range(_lowerCAmelCase , max_face_number + 1 ) for dice_numbers in product(_lowerCAmelCase , repeat=_lowerCAmelCase ): UpperCAmelCase : Optional[Any] = sum(_lowerCAmelCase ) totals_frequencies[total] += 1 return totals_frequencies def snake_case_ ( ) -> float: UpperCAmelCase : str = total_frequency_distribution( sides_number=4 , dice_number=9 ) UpperCAmelCase : int = total_frequency_distribution( sides_number=6 , dice_number=6 ) UpperCAmelCase : str = 0 UpperCAmelCase : List[Any] = 9 UpperCAmelCase : Optional[Any] = 4 * 9 UpperCAmelCase : Tuple = 6 for peter_total in range(_lowerCAmelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) UpperCAmelCase : Optional[int] = (4**9) * (6**6) UpperCAmelCase : Union[str, Any] = peter_wins_count / total_games_number UpperCAmelCase : Dict = round(_lowerCAmelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(F"{solution() = }")
23
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _snake_case = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } _snake_case = { '169M': 768, '430M': 1024, '1B5': 2048, '3B': 2560, '7B': 4096, '14B': 5120, } def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : int = list(state_dict.keys() ) for name in state_dict_keys: _a : str = state_dict.pop(UpperCamelCase__ ) # emb -> embedding if name.startswith("""emb.""" ): _a : Dict = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): _a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention _a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ ) # ffn -> feed_forward _a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): _a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): _a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): _a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": _a : Optional[int] = """rwkv.""" + name _a : Any = weight return state_dict def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ): '''simple docstring''' # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) _a : Tuple = 5_0_2_7_7 _a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: _a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ ) _a : int = len(UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) # 2. Build the config _a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _a : Tuple = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) _a : List[Any] = RwkvConfig( vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(UpperCamelCase__ ) # 3. Download model file then convert state_dict _a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ ) _a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : List[str] = convert_state_dict(UpperCamelCase__ ) # 4. Split in shards and save _a , _a : List[str] = shard_checkpoint(UpperCamelCase__ ) for shard_file, shard in shards.items(): torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if index is not None: _a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) # Save the index as well with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f: _a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n""" f.write(UpperCamelCase__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) _a : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) _a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" ) tokenizer.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) _snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
294
0
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() snake_case_ = logging.get_logger(__name__) snake_case_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'encoder.layer_norm_for_extract': 'layer_norm_for_extract', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'lm_head', 'label_embs_concat': 'label_embeddings_concat', 'mask_emb': 'masked_spec_embed', 'spk_proj': 'speaker_proj', } snake_case_ = [ 'lm_head', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', 'label_embeddings_concat', 'speaker_proj', 'layer_norm_for_extract', ] def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : str ) -> List[str]: for attribute in key.split('''.''' ): __snake_case = getattr(snake_case_ , snake_case_ ) if weight_type is not None: __snake_case = getattr(snake_case_ , snake_case_ ).shape else: __snake_case = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __snake_case = value elif weight_type == "weight_g": __snake_case = value elif weight_type == "weight_v": __snake_case = value elif weight_type == "bias": __snake_case = value else: __snake_case = value logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" ) def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : List[str] ) -> List[str]: __snake_case = [] __snake_case = fairseq_model.state_dict() __snake_case = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): __snake_case = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == '''group''' , ) __snake_case = True else: for key, mapped_key in MAPPING.items(): __snake_case = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key): # special case since naming is very similar continue __snake_case = True if "*" in mapped_key: __snake_case = name.split(snake_case_ )[0].split('''.''' )[-2] __snake_case = mapped_key.replace('''*''' , snake_case_ ) if "weight_g" in name: __snake_case = '''weight_g''' elif "weight_v" in name: __snake_case = '''weight_v''' elif "bias" in name: __snake_case = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __snake_case = '''weight''' else: __snake_case = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCamelCase__ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Optional[int] ) -> List[Any]: __snake_case = full_name.split('''conv_layers.''' )[-1] __snake_case = name.split('''.''' ) __snake_case = int(items[0] ) __snake_case = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __snake_case = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __snake_case = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.""" ) __snake_case = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __snake_case = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : int=None , snake_case_ : str=None , snake_case_ : Optional[int]=True ) -> int: if config_path is not None: __snake_case = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: __snake_case = UniSpeechSatConfig() __snake_case = '''''' if is_finetuned: __snake_case = UniSpeechSatForCTC(snake_case_ ) else: __snake_case = UniSpeechSatForPreTraining(snake_case_ ) __snake_case , __snake_case , __snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) __snake_case = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": snake_case_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) snake_case_ = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
24
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> List[str]: _a : Any = """laion/clap-htsat-unfused""" _a : Union[str, Any] = tempfile.mkdtemp() def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict: return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : List[str] ) -> Optional[int]: _a : List[str] = self.get_tokenizer() _a : Any = self.get_feature_extractor() _a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) _a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> Optional[int]: _a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> Optional[Any]: _a : Optional[int] = self.get_feature_extractor() _a : Tuple = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = floats_list((3, 1000) ) _a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Tuple ) -> Optional[int]: _a : List[str] = self.get_feature_extractor() _a : Any = self.get_tokenizer() _a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Optional[int] = """This is a test string""" _a : Tuple = processor(text=UpperCAmelCase__ ) _a : int = tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : List[Any] ) -> Any: _a : str = self.get_feature_extractor() _a : List[str] = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : Dict = processor.batch_decode(UpperCAmelCase__ ) _a : Any = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> List[str]: _a : str = self.get_feature_extractor() _a : Optional[Any] = self.get_tokenizer() _a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
294
0
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCAmelCase__ : Optional[Any] = 'src/diffusers' UpperCAmelCase__ : Any = '.' # This is to make sure the diffusers module imported is the one in the repo. UpperCAmelCase__ : int = importlib.util.spec_from_file_location( 'diffusers', os.path.join(DIFFUSERS_PATH, '__init__.py'), submodule_search_locations=[DIFFUSERS_PATH], ) UpperCAmelCase__ : Optional[int] = spec.loader.load_module() def lowercase_ ( _snake_case ,_snake_case ): return line.startswith(_snake_case ) or len(_snake_case ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" ,_snake_case ) is not None def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : int = object_name.split(""".""" ) SCREAMING_SNAKE_CASE__ : List[Any] = 0 # First let's find the module where our object lives. SCREAMING_SNAKE_CASE__ : Dict = parts[i] while i < len(_snake_case ) and not os.path.isfile(os.path.join(_snake_case ,f'''{module}.py''' ) ): i += 1 if i < len(_snake_case ): SCREAMING_SNAKE_CASE__ : str = os.path.join(_snake_case ,parts[i] ) if i >= len(_snake_case ): raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(_snake_case ,f'''{module}.py''' ) ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: SCREAMING_SNAKE_CASE__ : List[Any] = f.readlines() # Now let's find the class / func in the code! SCREAMING_SNAKE_CASE__ : str = """""" SCREAMING_SNAKE_CASE__ : Dict = 0 for name in parts[i + 1 :]: while ( line_index < len(_snake_case ) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' ,lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(_snake_case ): raise ValueError(f''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). SCREAMING_SNAKE_CASE__ : List[Any] = line_index while line_index < len(_snake_case ) and _should_continue(lines[line_index] ,_snake_case ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 SCREAMING_SNAKE_CASE__ : List[Any] = lines[start_index:line_index] return "".join(_snake_case ) UpperCAmelCase__ : Optional[int] = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)') UpperCAmelCase__ : Any = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)') UpperCAmelCase__ : Optional[Any] = re.compile(r'<FILL\s+[^>]*>') def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : Dict = code.split("""\n""" ) SCREAMING_SNAKE_CASE__ : str = 0 while idx < len(_snake_case ) and len(lines[idx] ) == 0: idx += 1 if idx < len(_snake_case ): return re.search(R"""^(\s*)\S""" ,lines[idx] ).groups()[0] return "" def lowercase_ ( _snake_case ): SCREAMING_SNAKE_CASE__ : List[str] = len(get_indent(_snake_case ) ) > 0 if has_indent: SCREAMING_SNAKE_CASE__ : Dict = f'''class Bla:\n{code}''' SCREAMING_SNAKE_CASE__ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ,preview=_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = black.format_str(_snake_case ,mode=_snake_case ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = style_docstrings_in_code(_snake_case ) return result[len("""class Bla:\n""" ) :] if has_indent else result def lowercase_ ( _snake_case ,_snake_case=False ): with open(_snake_case ,"""r""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] = f.readlines() SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : List[str] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(_snake_case ): SCREAMING_SNAKE_CASE__ : Optional[int] = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = search.groups() SCREAMING_SNAKE_CASE__ : Optional[Any] = find_code_in_diffusers(_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = get_indent(_snake_case ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 SCREAMING_SNAKE_CASE__ : List[str] = theoretical_indent SCREAMING_SNAKE_CASE__ : int = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. SCREAMING_SNAKE_CASE__ : Union[str, Any] = True while line_index < len(_snake_case ) and should_continue: line_index += 1 if line_index >= len(_snake_case ): break SCREAMING_SNAKE_CASE__ : List[str] = lines[line_index] SCREAMING_SNAKE_CASE__ : Tuple = _should_continue(_snake_case ,_snake_case ) and re.search(f'''^{indent}# End copy''' ,_snake_case ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 SCREAMING_SNAKE_CASE__ : Tuple = lines[start_index:line_index] SCREAMING_SNAKE_CASE__ : Any = """""".join(_snake_case ) # Remove any nested `Copied from` comments to avoid circular copies SCREAMING_SNAKE_CASE__ : Union[str, Any] = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(_snake_case ) is None] SCREAMING_SNAKE_CASE__ : Tuple = """\n""".join(_snake_case ) # Before comparing, use the `replace_pattern` on the original code. if len(_snake_case ) > 0: SCREAMING_SNAKE_CASE__ : str = replace_pattern.replace("""with""" ,"""""" ).split(""",""" ) SCREAMING_SNAKE_CASE__ : List[str] = [_re_replace_pattern.search(_snake_case ) for p in patterns] for pattern in patterns: if pattern is None: continue SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pattern.groups() SCREAMING_SNAKE_CASE__ : Dict = re.sub(_snake_case ,_snake_case ,_snake_case ) if option.strip() == "all-casing": SCREAMING_SNAKE_CASE__ : List[Any] = re.sub(obja.lower() ,obja.lower() ,_snake_case ) SCREAMING_SNAKE_CASE__ : Optional[Any] = re.sub(obja.upper() ,obja.upper() ,_snake_case ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line SCREAMING_SNAKE_CASE__ : Union[str, Any] = blackify(lines[start_index - 1] + theoretical_code ) SCREAMING_SNAKE_CASE__ : Optional[Any] = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: SCREAMING_SNAKE_CASE__ : List[str] = lines[:start_index] + [theoretical_code] + lines[line_index:] SCREAMING_SNAKE_CASE__ : Optional[int] = start_index + 1 if overwrite and len(_snake_case ) > 0: # Warn the user a file has been modified. print(f'''Detected changes, rewriting {filename}.''' ) with open(_snake_case ,"""w""" ,encoding="""utf-8""" ,newline="""\n""" ) as f: f.writelines(_snake_case ) return diffs def lowercase_ ( _snake_case = False ): SCREAMING_SNAKE_CASE__ : Optional[Any] = glob.glob(os.path.join(_snake_case ,"""**/*.py""" ) ,recursive=_snake_case ) SCREAMING_SNAKE_CASE__ : List[Any] = [] for filename in all_files: SCREAMING_SNAKE_CASE__ : Tuple = is_copy_consistent(_snake_case ,_snake_case ) diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(_snake_case ) > 0: SCREAMING_SNAKE_CASE__ : List[Any] = """\n""".join(_snake_case ) raise Exception( """Found the following copy inconsistencies:\n""" + diff + """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" ) if __name__ == "__main__": UpperCAmelCase__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') UpperCAmelCase__ : Dict = parser.parse_args() check_copies(args.fix_and_overwrite)
25
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case = logging.get_logger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
294
0
from __future__ import annotations from scipy.special import comb # type: ignore class lowercase : def __init__( self , _a ) -> Union[str, Any]: _A : Tuple = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. _A : Dict = len(_a ) - 1 def a__ ( self , _a ) -> list[float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." _A : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , _a ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(_a ) , 5 ) == 1 return output_values def a__ ( self , _a ) -> tuple[float, float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." _A : Optional[int] = self.basis_function(_a ) _A : List[str] = 0.0 _A : Tuple = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def a__ ( self , _a = 0.01 ) -> str: from matplotlib import pyplot as plt # type: ignore _A : list[float] = [] # x coordinates of points to plot _A : list[float] = [] # y coordinates of points to plot _A : str = 0.0 while t <= 1: _A : Optional[Any] = self.bezier_curve_function(_a ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size _A : Union[str, Any] = [i[0] for i in self.list_of_points] _A : str = [i[1] for i in self.list_of_points] plt.plot( _a , _a , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , ) plt.scatter(_a , _a , color="""red""" , label="""Control Points""" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
26
"""simple docstring""" import unittest import numpy as np def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ): '''simple docstring''' _a : List[Any] = np.shape(UpperCamelCase__ ) _a : Any = np.shape(UpperCamelCase__ ) _a : Union[str, Any] = np.shape(UpperCamelCase__ ) if shape_a[0] != shape_b[0]: _a : int = ( """Expected the same number of rows for A and B. """ F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(UpperCamelCase__ ) if shape_b[1] != shape_c[1]: _a : Tuple = ( """Expected the same number of columns for B and C. """ F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(UpperCamelCase__ ) _a : int = pseudo_inv if a_inv is None: try: _a : Optional[int] = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> None: _a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Optional[int] = np.array([[2, 1], [6, 3]] ) _a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _a : Union[str, Any] = np.block([[a, b], [b.T, c]] ) _a : int = np.linalg.det(UpperCAmelCase__ ) _a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ ) _a : List[Any] = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def _lowercase ( self : int ) -> None: _a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> None: _a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) _a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
294
0
'''simple docstring''' import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __lowercase : List[str] = datasets.logging.get_logger(__name__) __lowercase : List[str] = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' __lowercase : Dict = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' __lowercase : Optional[int] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n' __lowercase : str = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __UpperCamelCase ( datasets.Metric ): def __UpperCAmelCase ( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/google-research/bleurt' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/google-research/bleurt'] , reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'] , ) def __UpperCAmelCase ( self , __a ): '''simple docstring''' if self.config_name == "default": logger.warning( 'Using default BLEURT-Base checkpoint for sequence maximum length 128. ' 'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').' ) __a : int = 'bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: __a : str = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __a : List[str] = self.config_name.upper() else: raise KeyError( f"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer __a : Optional[int] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __a : Optional[int] = score.BleurtScorer(os.path.join(__a , __a ) ) def __UpperCAmelCase ( self , __a , __a ): '''simple docstring''' __a : Union[str, Any] = self.scorer.score(references=__a , candidates=__a ) return {"scores": scores}
27
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = torch.device('cpu') def lowerCAmelCase__ ( ): '''simple docstring''' _a : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" _a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Any = dct.pop(UpperCamelCase__ ) _a : Dict = val def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : Tuple = [] for k in state_dict.keys(): _a : Any = k if ".pwconv" in k: _a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _a : int = k_new.split(""".""" ) if ls[2].isdigit(): _a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _a : Optional[int] = 1_0_0_0 _a : Optional[Any] = """huggingface/label-files""" _a : Optional[Any] = """imagenet-1k-id2label.json""" _a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) _a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _a : Dict = idalabel _a : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _a : Any = [3, 3, 6, 4] _a : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": _a : Any = [3, 3, 9, 6] _a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": _a : List[Any] = [4, 3, 1_0, 5] _a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": _a : List[Any] = [4, 4, 1_2, 6] _a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ ) else: _a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : int = checkpoint _a : Optional[Any] = create_rename_keys(UpperCamelCase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model _a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval() hf_model.load_state_dict(UpperCamelCase__ ) # prepare test inputs _a : Any = prepare_img() _a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" ) # compare outputs from both models _a : Dict = get_expected_output(UpperCamelCase__ ) _a : int = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') _snake_case = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
294
0
'''simple docstring''' from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=_a ) class SCREAMING_SNAKE_CASE ( _a ): """simple docstring""" _SCREAMING_SNAKE_CASE = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) _SCREAMING_SNAKE_CASE = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} ) _SCREAMING_SNAKE_CASE = Features( { """answers""": Sequence( { """text""": Value("""string""" ), """answer_start""": Value("""int32""" ), } ) } ) _SCREAMING_SNAKE_CASE = "question" _SCREAMING_SNAKE_CASE = "context" _SCREAMING_SNAKE_CASE = "answers" @property def A ( self : Dict ): """simple docstring""" return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
28
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['PerceiverFeatureExtractor'] _snake_case = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __UpperCAmelCase = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys __UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
29
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict: if k in (0.0_4, 0.0_6): _a : List[str] = k _a : List[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Dict ) -> str: return str(self.k ) def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]: _a : Dict = cva.imread(UpperCAmelCase__ , 0 ) _a , _a : List[Any] = img.shape _a : list[list[int]] = [] _a : List[Any] = img.copy() _a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB ) _a , _a : Any = np.gradient(UpperCAmelCase__ ) _a : Tuple = dx**2 _a : Union[str, Any] = dy**2 _a : Union[str, Any] = dx * dy _a : int = 0.0_4 _a : List[str] = self.window_size // 2 for y in range(UpperCAmelCase__ , h - offset ): for x in range(UpperCAmelCase__ , w - offset ): _a : str = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : List[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Any = (wxx * wyy) - (wxy**2) _a : Tuple = wxx + wyy _a : Any = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _snake_case = HarrisCorner(0.04, 3) _snake_case , _snake_case = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
294
0
import torch from torch import nn class lowercase__( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1 , SCREAMING_SNAKE_CASE_ : Optional[int]=False ) -> Any: super().__init__() lowercase_ = n_token lowercase_ = d_embed lowercase_ = d_proj lowercase_ = cutoffs + [n_token] lowercase_ = [0] + self.cutoffs lowercase_ = div_val lowercase_ = self.cutoffs[0] lowercase_ = len(self.cutoffs ) - 1 lowercase_ = self.shortlist_size + self.n_clusters if self.n_clusters > 0: lowercase_ = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) ) lowercase_ = nn.Parameter(torch.zeros(self.n_clusters ) ) lowercase_ = nn.ModuleList() lowercase_ = nn.ParameterList() if div_val == 1: for i in range(len(self.cutoffs ) ): if d_proj != d_embed: self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) else: self.out_projs.append(SCREAMING_SNAKE_CASE_ ) self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) else: for i in range(len(self.cutoffs ) ): lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ = d_embed // (div_val**i) self.out_projs.append(nn.Parameter(torch.FloatTensor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ) self.out_layers.append(nn.Linear(SCREAMING_SNAKE_CASE_ , r_idx - l_idx ) ) lowercase_ = keep_order def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple: if proj is None: lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) else: # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1: lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , proj.t().contiguous() ) lowercase_ = nn.functional.linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ ) # else: # logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t())) # if bias is not None: # logit = logit + bias return logit def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> Optional[Any]: if labels is not None: # Shift so that tokens < n predict n lowercase_ = hidden[..., :-1, :].contiguous() lowercase_ = labels[..., 1:].contiguous() lowercase_ = hidden.view(-1 , hidden.size(-1 ) ) lowercase_ = labels.view(-1 ) if hidden.size(0 ) != labels.size(0 ): raise RuntimeError('''Input and labels should have the same size in the batch dimension.''' ) else: lowercase_ = hidden.view(-1 , hidden.size(-1 ) ) if self.n_clusters == 0: lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) if labels is not None: lowercase_ = labels != -1_0_0 lowercase_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device ) lowercase_ = ( -nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 ) ) else: lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) else: # construct weights and biases lowercase_ , lowercase_ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ = self.out_layers[0].weight[l_idx:r_idx] lowercase_ = self.out_layers[0].bias[l_idx:r_idx] else: lowercase_ = self.out_layers[i].weight lowercase_ = self.out_layers[i].bias if i == 0: lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(SCREAMING_SNAKE_CASE_ ) biases.append(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) if labels is None: lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) else: lowercase_ = torch.zeros_like(SCREAMING_SNAKE_CASE_ , dtype=hidden.dtype , device=hidden.device ) lowercase_ = 0 lowercase_ = [0] + self.cutoffs for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1] if labels is not None: lowercase_ = (labels >= l_idx) & (labels < r_idx) lowercase_ = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue lowercase_ = labels.index_select(0 , SCREAMING_SNAKE_CASE_ ) - l_idx lowercase_ = head_logprob.index_select(0 , SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden.index_select(0 , SCREAMING_SNAKE_CASE_ ) else: lowercase_ = hidden if i == 0: if labels is not None: lowercase_ = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 ) else: lowercase_ = head_logprob[:, : self.cutoffs[0]] else: lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) lowercase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster if labels is not None: lowercase_ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather( 1 , target_i[:, None] ).squeeze(1 ) else: lowercase_ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i lowercase_ = logprob_i if labels is not None: if (hasattr(self , '''keep_order''' ) and self.keep_order) or keep_order: out.index_copy_(0 , SCREAMING_SNAKE_CASE_ , -logprob_i ) else: out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i ) offset += logprob_i.size(0 ) return out def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: if self.n_clusters == 0: lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] ) return nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=-1 ) else: # construct weights and biases lowercase_ , lowercase_ = [], [] for i in range(len(self.cutoffs ) ): if self.div_val == 1: lowercase_ , lowercase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1] lowercase_ = self.out_layers[0].weight[l_idx:r_idx] lowercase_ = self.out_layers[0].bias[l_idx:r_idx] else: lowercase_ = self.out_layers[i].weight lowercase_ = self.out_layers[i].bias if i == 0: lowercase_ = torch.cat([weight_i, self.cluster_weight] , dim=0 ) lowercase_ = torch.cat([bias_i, self.cluster_bias] , dim=0 ) weights.append(SCREAMING_SNAKE_CASE_ ) biases.append(SCREAMING_SNAKE_CASE_ ) lowercase_ , lowercase_ , lowercase_ = weights[0], biases[0], self.out_projs[0] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden.new_empty((head_logit.size(0 ), self.n_token) ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) lowercase_ = [0] + self.cutoffs for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ): lowercase_ , lowercase_ = cutoff_values[i], cutoff_values[i + 1] if i == 0: lowercase_ = head_logprob[:, : self.cutoffs[0]] else: lowercase_ , lowercase_ , lowercase_ = weights[i], biases[i], self.out_projs[i] lowercase_ = self._compute_logit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) lowercase_ = nn.functional.log_softmax(SCREAMING_SNAKE_CASE_ , dim=1 ) lowercase_ = head_logprob[:, -i] + tail_logprob_i lowercase_ = logprob_i return out
30
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' # Check if the input is valid if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients _a , _a , _a : Any = equationa _a , _a , _a : Tuple = equationa # Calculate the determinants of the matrices _a : int = aa * ba - aa * ba _a : str = ca * ba - ca * ba _a : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _a : Dict = determinant_x / determinant _a : str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
294
0
'''simple docstring''' import unittest import numpy as np import torch from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' @property def _A ( self : List[str] ): torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) return model def _A ( self : int ): _UpperCAmelCase : Optional[Any] = self.dummy_uncond_unet _UpperCAmelCase : Any = ScoreSdeVeScheduler() _UpperCAmelCase : str = ScoreSdeVePipeline(unet=A , scheduler=A ) sde_ve.to(A ) sde_ve.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = torch.manual_seed(0 ) _UpperCAmelCase : List[str] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A ).images _UpperCAmelCase : Optional[int] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A , return_dict=A )[ 0 ] _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] _UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch class lowerCamelCase_ (unittest.TestCase ): '''simple docstring''' def _A ( self : int ): _UpperCAmelCase : int = "google/ncsnpp-church-256" _UpperCAmelCase : Any = UNetaDModel.from_pretrained(A ) _UpperCAmelCase : List[Any] = ScoreSdeVeScheduler.from_pretrained(A ) _UpperCAmelCase : Union[str, Any] = ScoreSdeVePipeline(unet=A , scheduler=A ) sde_ve.to(A ) sde_ve.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = torch.manual_seed(0 ) _UpperCAmelCase : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A ).images _UpperCAmelCase : int = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _UpperCAmelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
31
"""simple docstring""" _snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}] _snake_case = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
294
0
import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset UpperCAmelCase_ : Tuple = 'bert-base-cased' UpperCAmelCase_ : str = 'google/pegasus-xsum' UpperCAmelCase_ : int = [' Sam ate lunch today.', 'Sams lunch ingredients.'] UpperCAmelCase_ : Optional[Any] = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee'] UpperCAmelCase_ : str = 'patrickvonplaten/t5-tiny-random' UpperCAmelCase_ : Optional[int] = 'sshleifer/bart-tiny-random' UpperCAmelCase_ : Optional[Any] = 'sshleifer/tiny-mbart' UpperCAmelCase_ : Optional[int] = 'sshleifer/tiny-marian-en-de' def SCREAMING_SNAKE_CASE_ ( __A : Path , __A : list ) -> Tuple: """simple docstring""" a_ : List[str] = '\n'.join(__A ) Path(__A ).open('w' ).writelines(__A ) def SCREAMING_SNAKE_CASE_ ( __A : Dict ) -> List[Any]: """simple docstring""" for split in ["train", "val", "test"]: _dump_articles(os.path.join(__A , F"""{split}.source""" ) , __A ) _dump_articles(os.path.join(__A , F"""{split}.target""" ) , __A ) return tmp_dir class SCREAMING_SNAKE_CASE__ ( lowercase__ ): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def SCREAMING_SNAKE_CASE ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]: a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES ) a_ : Tuple = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES ) a_ : Optional[Any] = 4 a_ : str = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated a_ , a_ : str = 'ro_RO', 'de_DE' # ignored for all but mbart, but never causes error. a_ : str = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , ) a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place a_ : Tuple = shift_tokens_right(batch['labels'] , tokenizer.pad_token_id ) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED] ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> List[Any]: a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) a_ : Any = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in ARTICLES ) a_ : int = max(len(tokenizer.encode(SCREAMING_SNAKE_CASE__ ) ) for a in SUMMARIES ) a_ : List[Any] = 4 a_ : int = LegacySeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=2_0 , max_target_length=SCREAMING_SNAKE_CASE__ , ) a_ : Any = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=2 , collate_fn=train_dataset.collate_fn ) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def SCREAMING_SNAKE_CASE ( self : int ) -> Dict: a_ : int = AutoTokenizer.from_pretrained('facebook/mbart-large-cc25' ) a_ : Tuple = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) a_ : Union[str, Any] = tmp_dir.joinpath('train.source' ).open().readlines() a_ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) ) pack_data_dir(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 1_2_8 , SCREAMING_SNAKE_CASE__ ) a_ : List[str] = {x.name for x in tmp_dir.iterdir()} a_ : Optional[Any] = {x.name for x in save_dir.iterdir()} a_ : Optional[Any] = save_dir.joinpath('train.source' ).open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(SCREAMING_SNAKE_CASE__ ) < len(SCREAMING_SNAKE_CASE__ ) assert len(SCREAMING_SNAKE_CASE__ ) == 1 assert len(packed_examples[0] ) == sum(len(SCREAMING_SNAKE_CASE__ ) for x in orig_examples ) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='This test requires fairseq' ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]: if not FAIRSEQ_AVAILABLE: return a_ , a_ , a_ : Optional[int] = self._get_dataset(max_len=6_4 ) a_ : Tuple = 6_4 a_ : Any = ds.make_dynamic_sampler(SCREAMING_SNAKE_CASE__ , required_batch_size_multiple=SCREAMING_SNAKE_CASE__ ) a_ : List[Any] = [len(SCREAMING_SNAKE_CASE__ ) for x in batch_sampler] assert len(set(SCREAMING_SNAKE_CASE__ ) ) > 1 # it's not dynamic batch size if every batch is the same length assert sum(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) # no dropped or added examples a_ : Dict = DataLoader(SCREAMING_SNAKE_CASE__ , batch_sampler=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 ) a_ : Tuple = [] a_ : Optional[int] = [] for batch in data_loader: a_ : Any = batch['input_ids'].shape a_ : Any = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple a_ : List[Any] = np.product(batch['input_ids'].shape ) num_src_per_batch.append(SCREAMING_SNAKE_CASE__ ) if num_src_tokens > (max_tokens * 1.1): failures.append(SCREAMING_SNAKE_CASE__ ) assert num_src_per_batch[0] == max(SCREAMING_SNAKE_CASE__ ) if failures: raise AssertionError(F"""too many tokens in {len(SCREAMING_SNAKE_CASE__ )} batches""" ) def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: a_ , a_ , a_ : Union[str, Any] = self._get_dataset(max_len=5_1_2 ) a_ : Union[str, Any] = 2 a_ : Tuple = ds.make_sortish_sampler(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ ) a_ : Tuple = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 ) a_ : Optional[Any] = DataLoader(SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = tokenizer.pad_token_id def count_pad_tokens(SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any="input_ids" ): return [batch[k].eq(SCREAMING_SNAKE_CASE__ ).sum().item() for batch in data_loader] assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ , k='labels' ) ) assert sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) ) < sum(count_pad_tokens(SCREAMING_SNAKE_CASE__ ) ) assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=1_0_0_0 , SCREAMING_SNAKE_CASE__ : Any=1_2_8 ) -> Dict: if os.getenv('USE_REAL_DATA' , SCREAMING_SNAKE_CASE__ ): a_ : Optional[Any] = 'examples/seq2seq/wmt_en_ro' a_ : Dict = max_len * 2 * 6_4 if not Path(SCREAMING_SNAKE_CASE__ ).joinpath('train.len' ).exists(): save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else: a_ : Any = 'examples/seq2seq/test_data/wmt_en_ro' a_ : Tuple = max_len * 4 save_len_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) a_ : Any = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ) a_ : Optional[Any] = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=SCREAMING_SNAKE_CASE__ , type_path='train' , max_source_length=SCREAMING_SNAKE_CASE__ , max_target_length=SCREAMING_SNAKE_CASE__ , n_obs=SCREAMING_SNAKE_CASE__ , ) return ds, max_tokens, tokenizer def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple: a_ , a_ , a_ : Optional[int] = self._get_dataset() a_ : Optional[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) ) a_ : List[Any] = set(DistributedSortishSampler(SCREAMING_SNAKE_CASE__ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=SCREAMING_SNAKE_CASE__ ) ) assert idsa.intersection(SCREAMING_SNAKE_CASE__ ) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def SCREAMING_SNAKE_CASE ( self : Tuple , SCREAMING_SNAKE_CASE__ : str ) -> str: a_ : str = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , use_fast=SCREAMING_SNAKE_CASE__ ) if tok_name == MBART_TINY: a_ : Dict = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , src_lang='EN' , tgt_lang='FR' , ) a_ : Dict = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: a_ : Any = SeqaSeqDataset( SCREAMING_SNAKE_CASE__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='train' , max_source_length=4 , max_target_length=8 , ) a_ : List[Any] = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(SCREAMING_SNAKE_CASE__ ) == 1 if tok_name == BART_TINY else len(SCREAMING_SNAKE_CASE__ ) == 0
32
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = '''mvp''' UpperCamelCase : Union[str, Any] = ['''past_key_values'''] UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]: _a : Any = vocab_size _a : Any = max_position_embeddings _a : Union[str, Any] = d_model _a : List[str] = encoder_ffn_dim _a : List[Any] = encoder_layers _a : Dict = encoder_attention_heads _a : Tuple = decoder_ffn_dim _a : List[Any] = decoder_layers _a : Optional[Any] = decoder_attention_heads _a : Optional[Any] = dropout _a : str = attention_dropout _a : Dict = activation_dropout _a : Any = activation_function _a : Tuple = init_std _a : Dict = encoder_layerdrop _a : Optional[int] = decoder_layerdrop _a : Optional[Any] = classifier_dropout _a : List[Any] = use_cache _a : Dict = encoder_layers _a : str = scale_embedding # scale factor will be sqrt(d_model) if True _a : int = use_prompt _a : Dict = prompt_length _a : Dict = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ): _a : List[str] = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ """The config can simply be saved and uploaded again to be fixed.""" )
294
0
"""simple docstring""" import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, TextToVideoSDPipeline, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class _UpperCAmelCase ( _A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = TextToVideoSDPipeline SCREAMING_SNAKE_CASE_ : Optional[Any] = TEXT_TO_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def A ( self : str ) -> int: torch.manual_seed(0 ) lowercase_ : List[str] = UNetaDConditionModel( block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , ) lowercase_ : Union[str, Any] = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) lowercase_ : str = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , ) torch.manual_seed(0 ) lowercase_ : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , ) lowercase_ : Any = CLIPTextModel(A ) lowercase_ : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) lowercase_ : Tuple = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, } return components def A ( self : int , A : List[str] , A : List[str]=0 ) -> Any: if str(A ).startswith('''mps''' ): lowercase_ : Optional[Any] = torch.manual_seed(A ) else: lowercase_ : List[Any] = torch.Generator(device=A ).manual_seed(A ) lowercase_ : Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''pt''', } return inputs def A ( self : str ) -> Optional[Any]: lowercase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator lowercase_ : int = self.get_dummy_components() lowercase_ : List[Any] = TextToVideoSDPipeline(**A ) lowercase_ : Optional[Any] = sd_pipe.to(A ) sd_pipe.set_progress_bar_config(disable=A ) lowercase_ : int = self.get_dummy_inputs(A ) lowercase_ : Optional[Any] = '''np''' lowercase_ : Tuple = sd_pipe(**A ).frames lowercase_ : Any = frames[0][-3:, -3:, -1] assert frames[0].shape == (64, 64, 3) lowercase_ : Tuple = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def A ( self : int ) -> int: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=A , expected_max_diff=3e-3 ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def A ( self : List[str] ) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=A , expected_max_diff=1e-2 ) @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def A ( self : Dict ) -> List[Any]: pass @unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' ) def A ( self : Tuple ) -> Optional[Any]: pass @unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' ) def A ( self : Optional[Any] ) -> Tuple: pass def A ( self : List[str] ) -> Tuple: return super().test_progress_bar() @slow @skip_mps class _UpperCAmelCase ( unittest.TestCase ): def A ( self : Any ) -> Union[str, Any]: lowercase_ : Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy''' ) lowercase_ : List[str] = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) lowercase_ : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) lowercase_ : List[str] = pipe.to('''cuda''' ) lowercase_ : List[str] = '''Spiderman is surfing''' lowercase_ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase_ : int = pipe(A , generator=A , num_inference_steps=25 , output_type='''pt''' ).frames lowercase_ : Union[str, Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2 def A ( self : Any ) -> Dict: lowercase_ : int = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy''' ) lowercase_ : int = TextToVideoSDPipeline.from_pretrained('''damo-vilab/text-to-video-ms-1.7b''' ) lowercase_ : List[Any] = pipe.to('''cuda''' ) lowercase_ : Optional[Any] = '''Spiderman is surfing''' lowercase_ : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase_ : Union[str, Any] = pipe(A , generator=A , num_inference_steps=2 , output_type='''pt''' ).frames lowercase_ : Optional[Any] = video_frames.cpu().numpy() assert np.abs(expected_video - video ).mean() < 5e-2
33
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _snake_case = logging.getLogger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple: # in NER datasets, the last column is usually reserved for NER label _a : Optional[int] = label_idx def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : Any = mode.value _a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : int = 1 _a : int = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: _a : str = [] _a : str = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 _a : List[str] = [] _a : str = [] else: _a : List[Any] = line.split(""" """ ) words.append(splits[0] ) if len(UpperCAmelCase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) return examples def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]: _a : List[str] = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(UpperCAmelCase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(UpperCAmelCase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : List[Any] = f.read().splitlines() if "O" not in labels: _a : Union[str, Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCamelCase ( snake_case_ ): def __init__( self : Union[str, Any] ) -> List[str]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : Optional[int] = f.read().splitlines() if "O" not in labels: _a : Optional[Any] = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : List[Any] = mode.value _a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : List[str] = 1 _a : Optional[Any] = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(UpperCAmelCase__ ): _a : List[Any] = [] _a : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 return examples def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict: _a : Optional[Any] = 0 for sentence in parse_incr(UpperCAmelCase__ ): _a : List[str] = preds_list[example_id] _a : str = """""" for token in sentence: out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(UpperCAmelCase__ ) example_id += 1 def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
294
0
'''simple docstring''' import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient A =WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def snake_case_ (_a : Tuple ): UpperCAmelCase = test_results.split(''' ''' ) UpperCAmelCase = 0 UpperCAmelCase = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. UpperCAmelCase = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(_a ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def snake_case_ (_a : Optional[int] ): UpperCAmelCase = {} UpperCAmelCase = None UpperCAmelCase = False for line in failures_short_lines.split('''\n''' ): if re.search(R'''_ \[doctest\]''' , _a ): UpperCAmelCase = True UpperCAmelCase = line.split(''' ''' )[2] elif in_error and not line.split(''' ''' )[0].isdigit(): UpperCAmelCase = line UpperCAmelCase = False return failures class _a : def __init__( self : Dict , lowercase : str , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = title UpperCAmelCase = doc_test_results['''time_spent'''].split(''',''' )[0] UpperCAmelCase = doc_test_results['''success'''] UpperCAmelCase = doc_test_results['''failures'''] UpperCAmelCase = self.n_success + self.n_failures # Failures and success of the modeling tests UpperCAmelCase = doc_test_results @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = [self._time_spent] UpperCAmelCase = 0 for time in time_spent: UpperCAmelCase = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(lowercase ) == 1: UpperCAmelCase = [0, 0, time_parts[0]] UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3_600 + minutes * 60 + seconds UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60 return f"{int(lowercase )}h{int(lowercase )}m{int(lowercase )}s" @property def A ( self : int ): '''simple docstring''' return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def A ( self : Union[str, Any] ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def A ( self : int ): '''simple docstring''' return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" f" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def A ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase = 40 UpperCAmelCase = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(lowercase , lowercase )} UpperCAmelCase = '''''' for category, failures in category_failures.items(): if len(lowercase ) == 0: continue if report != "": report += "\n\n" report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(lowercase ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f"The following examples had failures:\n\n\n{report}\n", }, } @property def A ( self : Any ): '''simple docstring''' UpperCAmelCase = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(lowercase ) @staticmethod def A ( ): '''simple docstring''' UpperCAmelCase = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(lowercase )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=lowercase , ) def A ( self : Optional[Any] ): '''simple docstring''' print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) UpperCAmelCase = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.''' UpperCAmelCase = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=lowercase , ) def A ( self : Optional[int] , lowercase : Any , lowercase : Any , lowercase : Union[str, Any] , lowercase : List[str] ): '''simple docstring''' UpperCAmelCase = '''''' for key, value in failures.items(): UpperCAmelCase = value[:200] + ''' [Truncated]''' if len(lowercase ) > 250 else value failures_text += f"*{key}*\n_{value}_\n\n" UpperCAmelCase = job_name UpperCAmelCase = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: UpperCAmelCase = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def A ( self : Optional[int] ): '''simple docstring''' if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) UpperCAmelCase = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda lowercase : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): UpperCAmelCase = f"*Num failures* :{len(job_result['failed'] )} \n" UpperCAmelCase = job_result['''failures'''] UpperCAmelCase = self.get_reply_blocks(lowercase , lowercase , lowercase , text=lowercase ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f"Results for {job}" , blocks=lowercase , thread_ts=self.thread_ts['''ts'''] , ) time.sleep(1 ) def snake_case_ (): UpperCAmelCase = os.environ['''GITHUB_RUN_ID'''] UpperCAmelCase = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" UpperCAmelCase = requests.get(_a ).json() UpperCAmelCase = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) UpperCAmelCase = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 ) for i in range(_a ): UpperCAmelCase = requests.get(url + F"&page={i + 2}" ).json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} ) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' , _a ) return {} def snake_case_ (_a : str ): UpperCAmelCase = {} if os.path.exists(_a ): UpperCAmelCase = os.listdir(_a ) for file in files: try: with open(os.path.join(_a , _a ) , encoding='''utf-8''' ) as f: UpperCAmelCase = f.read() except UnicodeDecodeError as e: raise ValueError(F"Could not open {os.path.join(_a , _a )}." ) from e return _artifact def snake_case_ (): class _a : def __init__( self : Any , lowercase : str ): '''simple docstring''' UpperCAmelCase = name UpperCAmelCase = [] def __str__( self : Tuple ): '''simple docstring''' return self.name def A ( self : List[Any] , lowercase : str ): '''simple docstring''' self.paths.append({'''name''': self.name, '''path''': path} ) UpperCAmelCase = {} UpperCAmelCase = filter(os.path.isdir , os.listdir() ) for directory in directories: UpperCAmelCase = directory if artifact_name not in _available_artifacts: UpperCAmelCase = Artifact(_a ) _available_artifacts[artifact_name].add_path(_a ) return _available_artifacts if __name__ == "__main__": A =get_job_links() A =retrieve_available_artifacts() A =collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' A ={ v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job A =github_actions_job_links.get('run_doctests') A =available_artifacts['doc_tests_gpu_test_reports'].paths[0] A =retrieve_artifact(artifact_path['name']) if "stats" in artifact: A , A , A =handle_test_results(artifact['stats']) A =failed A =success A =time_spent[1:-1] + ', ' A =extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): A =line.replace('FAILED ', '') A =line.split()[0].replace('\n', '') if "::" in line: A , A =line.split('::') else: A , A =line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): A =docs[file_regex] doc_test_results[category]["failed"].append(test) A =all_failures[test] if test in all_failures else 'N/A' A =failure break A =Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
34
"""simple docstring""" from __future__ import annotations import time import numpy as np _snake_case = [8, 5, 9, 7] _snake_case = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _snake_case = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase : def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None: _a : List[str] = claim_vector _a : List[Any] = allocated_resources_table _a : Union[str, Any] = maximum_claim_table def _lowercase ( self : Tuple ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _lowercase ( self : int ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _lowercase ( self : List[str] ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]: return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()} def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None: _a : List[Any] = self.__need() _a : Optional[int] = self.__allocated_resources_table _a : str = self.__available_resources() _a : Optional[Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: _a : int = False for each_need in need_list: _a : Optional[int] = True for index, need in enumerate(UpperCAmelCase__ ): if need > available_resources[index]: _a : List[Any] = False break if execution: _a : str = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: _a : Any = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(UpperCAmelCase__ ) # update available/freed resources stack _a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def _lowercase ( self : Any ) -> Optional[int]: print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
294
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available __a = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["SpeechEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = ["FlaxSpeechEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar _snake_case = TypeVar('_T') class UpperCamelCase ( Generic[_T] ): def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None: _a : list[_T] = list(iterable or [] ) _a : list[_T] = [] def __len__( self : str ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[str] ) -> str: return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None: self._stacka.append(UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ) -> _T: _a : Any = self._stacka.pop _a : Union[str, Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
294
0
import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( a , unittest.TestCase): lowerCamelCase__ = MgpstrTokenizer lowerCamelCase__ = False lowerCamelCase__ = {} lowerCamelCase__ = False def snake_case__ ( self): '''simple docstring''' super().setUp() # fmt: off _lowerCAmelCase : Union[str, Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"] # fmt: on _lowerCAmelCase : List[str] = dict(zip(__a, range(len(__a)))) _lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(__a) + "\n") def snake_case__ ( self, **__a): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname, **__a) def snake_case__ ( self, __a): '''simple docstring''' _lowerCAmelCase : str = "tester" _lowerCAmelCase : List[str] = "tester" return input_text, output_text @unittest.skip("MGP-STR always lower cases letters.") def snake_case__ ( self): '''simple docstring''' pass def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[str] = self.get_tokenizers(do_lower_case=__a) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): _lowerCAmelCase : str = "[SPECIAL_TOKEN]" tokenizer.add_special_tokens({"cls_token": special_token}) _lowerCAmelCase : List[str] = tokenizer.encode([special_token], add_special_tokens=__a) self.assertEqual(len(__a), 1) _lowerCAmelCase : Optional[Any] = tokenizer.decode(__a, skip_special_tokens=__a) self.assertTrue(special_token not in decoded) def snake_case__ ( self): '''simple docstring''' _lowerCAmelCase : List[Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_input_output_texts(__a) _lowerCAmelCase : int = tokenizer.tokenize(__a) _lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(__a) _lowerCAmelCase : Dict = tokenizer.encode(__a, add_special_tokens=__a) self.assertListEqual(__a, __a) _lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(__a) self.assertNotEqual(len(__a), 0) _lowerCAmelCase : List[str] = tokenizer.decode(__a) self.assertIsInstance(__a, __a) self.assertEqual(text_a.replace(" ", ""), __a) @unittest.skip("MGP-STR tokenizer only handles one sequence.") def snake_case__ ( self): '''simple docstring''' pass @unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer") def snake_case__ ( self): '''simple docstring''' pass
36
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _snake_case = False class UpperCamelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Optional[Any] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Tuple ) -> List[Any]: _a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Optional[Any] = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase__ ) _a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : Optional[Any] = generator.manual_seed(0 ) _a : str = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : int = """cyberpunk 2077""" _a : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Tuple = torch.manual_seed(0 ) _a : Any = pipe.dual_guided( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _a : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : int = """A painting of a squirrel eating a burger """ _a : Tuple = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.text_to_image( prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images _a : int = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images _a : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
294
0
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def _SCREAMING_SNAKE_CASE ( UpperCamelCase ): """simple docstring""" lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(UpperCamelCase ): for j in range(UpperCamelCase ): lowerCAmelCase__ : Union[str, Any] = [255, 255, 255] - img[i][j] return img if __name__ == "__main__": # read original image _lowerCAmelCase = imread('''image_data/lena.jpg''', 1) # convert to its negative _lowerCAmelCase = convert_to_negative(img) # show result image imshow('''negative of original image''', img) waitKey(0) destroyAllWindows()
37
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'vocab.json'} _snake_case = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _snake_case = {'mgp-str': 27} class UpperCamelCase ( snake_case_ ): UpperCamelCase : List[str] = VOCAB_FILES_NAMES UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int: super().__init__( unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle: _a : int = json.load(UpperCAmelCase__ ) _a : Optional[int] = {v: k for k, v in self.vocab.items()} @property def _lowercase ( self : Dict ) -> Union[str, Any]: return len(self.vocab ) def _lowercase ( self : Union[str, Any] ) -> str: return dict(self.vocab , **self.added_tokens_encoder ) def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]: _a : Tuple = [] for s in text: char_tokens.extend(UpperCAmelCase__ ) return char_tokens def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict: return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: return self.decoder.get(UpperCAmelCase__ ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) ) return _a : Tuple = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" ) return (vocab_file,)
294
0
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> float: """simple docstring""" UpperCamelCase :Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def SCREAMING_SNAKE_CASE_ ( ) -> Any: """simple docstring""" print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
38
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = (IPNDMScheduler,) UpperCamelCase : int = (('''num_inference_steps''', 50),) def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int: _a : Optional[int] = {"""num_train_timesteps""": 1000} config.update(**UpperCAmelCase__ ) return config def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: _a : Optional[int] = dict(self.forward_default_kwargs ) _a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : Union[str, Any] = 0.1 * sample _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Any = dummy_past_residuals[:] if time_step is None: _a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ ) new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Optional[Any] = dummy_past_residuals[:] _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : Tuple ) -> List[str]: pass def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: _a : Optional[Any] = dict(self.forward_default_kwargs ) _a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : List[Any] = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Union[str, Any] = self.get_scheduler_config() _a : Optional[Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) _a : Any = dummy_past_residuals[:] if time_step is None: _a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) _a : Optional[Any] = dummy_past_residuals[:] _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]: _a : Optional[int] = self.scheduler_classes[0] _a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) _a : int = 10 _a : List[Any] = self.dummy_model() _a : str = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _a : str = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample return sample def _lowercase ( self : int ) -> str: _a : Dict = dict(self.forward_default_kwargs ) _a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config() _a : Tuple = scheduler_class(**UpperCAmelCase__ ) _a : Tuple = self.dummy_sample _a : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ): scheduler.set_timesteps(UpperCAmelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ): _a : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] _a : Optional[Any] = dummy_past_residuals[:] _a : Optional[Any] = scheduler.timesteps[5] _a : str = scheduler.timesteps[6] _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) _a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self : List[str] ) -> List[str]: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> List[str]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : int ) -> List[Any]: _a : str = self.full_loop() _a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_mean.item() - 2540529 ) < 10
294
0
from __future__ import annotations def __A ( __lowerCAmelCase , __lowerCAmelCase )-> list[tuple[int, int]]: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = position _UpperCAmelCase = [ (y + 1, x + 2), (y - 1, x + 2), (y + 1, x - 2), (y - 1, x - 2), (y + 2, x + 1), (y + 2, x - 1), (y - 2, x + 1), (y - 2, x - 1), ] _UpperCAmelCase = [] for position in positions: _UpperCAmelCase , _UpperCAmelCase = position if 0 <= y_test < n and 0 <= x_test < n: permissible_positions.append(__lowerCAmelCase ) return permissible_positions def __A ( __lowerCAmelCase )-> bool: """simple docstring""" return not any(elem == 0 for row in board for elem in row ) def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> bool: """simple docstring""" if is_complete(__lowerCAmelCase ): return True for position in get_valid_pos(__lowerCAmelCase , len(__lowerCAmelCase ) ): _UpperCAmelCase , _UpperCAmelCase = position if board[y][x] == 0: _UpperCAmelCase = curr + 1 if open_knight_tour_helper(__lowerCAmelCase , __lowerCAmelCase , curr + 1 ): return True _UpperCAmelCase = 0 return False def __A ( __lowerCAmelCase )-> list[list[int]]: """simple docstring""" _UpperCAmelCase = [[0 for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )] for i in range(__lowerCAmelCase ): for j in range(__lowerCAmelCase ): _UpperCAmelCase = 1 if open_knight_tour_helper(__lowerCAmelCase , (i, j) , 1 ): return board _UpperCAmelCase = 0 _UpperCAmelCase = F"""Open Kight Tour cannot be performed on a board of size {n}""" raise ValueError(__lowerCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod()
39
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'spiece.model'} _snake_case = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class UpperCamelCase ( snake_case_ ): def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None: _a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token _a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) _a : Optional[Any] = 3 _a : Tuple = do_lower_case _a : Tuple = remove_space _a : Tuple = keep_accents _a : Tuple = vocab_file _a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) _a : int = jieba _a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _lowercase ( self : Optional[Any] ) -> Any: return len(self.sp_model ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> List[str]: _a : Tuple = self.__dict__.copy() _a : Tuple = None return state def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict: _a : Tuple = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple = {} _a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict: if self.remove_space: _a : Optional[int] = """ """.join(inputs.strip().split() ) else: _a : List[Any] = inputs _a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ ) _a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] ) if self.do_lower_case: _a : Union[str, Any] = outputs.lower() return outputs def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: _a : str = self.preprocess_text(UpperCAmelCase__ ) _a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) _a : Union[str, Any] = [] for piece in pieces: if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _a : Dict = cur_pieces[1:] else: _a : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase__ ) else: new_pieces.append(UpperCAmelCase__ ) return new_pieces def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int: return self.sp_model.PieceToId(UpperCAmelCase__ ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any: return self.sp_model.IdToPiece(UpperCAmelCase__ ) def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict: _a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip() return out_string def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Optional[Any] = [self.sep_token_id] _a : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] return ([0] * len(UpperCAmelCase__ )) + [1, 1] def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Any = [self.sep_token_id] _a : Optional[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Union[str, Any] = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , """wb""" ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,) def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]: _a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
294
0
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin __lowercase = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""") @require_sentencepiece @require_tokenizers class _A ( _a ,unittest.TestCase ): """simple docstring""" UpperCAmelCase : str = SpeechTaTokenizer UpperCAmelCase : List[Any] = False UpperCAmelCase : Any = True def __snake_case ( self : Optional[Any]): super().setUp() # We have a SentencePiece fixture for testing a : Optional[int] = SpeechTaTokenizer(__UpperCAmelCase) a : List[str] = AddedToken("<mask>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase) a : List[Any] = mask_token tokenizer.add_special_tokens({"mask_token": mask_token}) tokenizer.add_tokens(["<ctc_blank>"]) tokenizer.save_pretrained(self.tmpdirname) def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str): a : Dict = "this is a test" a : Tuple = "this is a test" return input_text, output_text def __snake_case ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=20 , __UpperCAmelCase : Tuple=5): a , a : List[Any] = self.get_input_output_texts(__UpperCAmelCase) a : Optional[Any] = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase) a : List[str] = tokenizer.decode(__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase) return text, ids def __snake_case ( self : Optional[Any]): a : Dict = "<pad>" a : str = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase) , __UpperCAmelCase) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase) , __UpperCAmelCase) def __snake_case ( self : List[Any]): a : Tuple = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , "<s>") self.assertEqual(vocab_keys[1] , "<pad>") self.assertEqual(vocab_keys[-4] , "œ") self.assertEqual(vocab_keys[-2] , "<mask>") self.assertEqual(vocab_keys[-1] , "<ctc_blank>") self.assertEqual(len(__UpperCAmelCase) , 81) def __snake_case ( self : Optional[int]): self.assertEqual(self.get_tokenizer().vocab_size , 79) def __snake_case ( self : int): a : Any = self.get_tokenizers(do_lower_case=__UpperCAmelCase) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}'''): a : int = tokenizer.vocab_size a : Optional[Any] = len(__UpperCAmelCase) self.assertNotEqual(__UpperCAmelCase , 0) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) a : List[str] = ["aaaaa bbbbbb", "cccccccccdddddddd"] a : Tuple = tokenizer.add_tokens(__UpperCAmelCase) a : Optional[int] = tokenizer.vocab_size a : Any = len(__UpperCAmelCase) self.assertNotEqual(__UpperCAmelCase , 0) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase)) self.assertEqual(__UpperCAmelCase , all_size + len(__UpperCAmelCase)) a : Union[str, Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=__UpperCAmelCase) self.assertGreaterEqual(len(__UpperCAmelCase) , 4) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) a : List[Any] = {"eos_token": ">>>>|||<||<<|<<", "pad_token": "<<<<<|||>|>>>>|>"} a : Any = tokenizer.add_special_tokens(__UpperCAmelCase) a : List[str] = tokenizer.vocab_size a : List[str] = len(__UpperCAmelCase) self.assertNotEqual(__UpperCAmelCase , 0) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase) self.assertEqual(__UpperCAmelCase , len(__UpperCAmelCase)) self.assertEqual(__UpperCAmelCase , all_size_a + len(__UpperCAmelCase)) a : Union[str, Any] = tokenizer.encode( ">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=__UpperCAmelCase) self.assertGreaterEqual(len(__UpperCAmelCase) , 6) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1) self.assertGreater(tokens[0] , tokens[1]) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1) self.assertGreater(tokens[-3] , tokens[-4]) self.assertEqual(tokens[0] , tokenizer.eos_token_id) self.assertEqual(tokens[-3] , tokenizer.pad_token_id) def __snake_case ( self : Dict): pass def __snake_case ( self : str): pass def __snake_case ( self : Union[str, Any]): a : Union[str, Any] = self.get_tokenizer() a : List[str] = tokenizer.tokenize("This is a test") # fmt: off self.assertListEqual(__UpperCAmelCase , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"]) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , ) a : Any = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( __UpperCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."]) a : Tuple = tokenizer.convert_tokens_to_ids(__UpperCAmelCase) # fmt: off self.assertListEqual(__UpperCAmelCase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26]) # fmt: on a : Optional[int] = tokenizer.convert_ids_to_tokens(__UpperCAmelCase) self.assertListEqual( __UpperCAmelCase , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."]) @slow def __snake_case ( self : Any): # Use custom sequence because this tokenizer does not handle numbers. a : Union[str, Any] = [ "Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides " "general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural " "Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained " "models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.", "BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly " "conditioning on both left and right context in all layers.", "The quick brown fox jumps over the lazy dog.", ] # fmt: off a : str = { "input_ids": [ [4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2], [4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], "attention_mask": [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=__UpperCAmelCase , )
40
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase ( nn.Module ): def __init__( self : Union[str, Any] ) -> int: super().__init__() _a : Optional[Any] = nn.Linear(3 , 4 ) _a : Tuple = nn.BatchNormad(4 ) _a : Dict = nn.Linear(4 , 5 ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int: return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) ) class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]: return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]: return output + 1 class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Dict ) -> str: _a : List[Any] = ModelForTest() _a : str = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(test_model._hf_hook , UpperCAmelCase__ ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Dict = ModelForTest() _a : Dict = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ ) self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Dict ) -> int: _a : str = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Optional[Any] = test_model(x + 1 ) _a : str = test_model(x + 2 ) _a : Union[str, Any] = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : int = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : str = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : int = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) def _lowercase ( self : Tuple ) -> int: _a : Tuple = ModelForTest() _a : Union[str, Any] = torch.randn(2 , 3 ) _a : Optional[int] = test_model(UpperCAmelCase__ ) _a : int = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : List[Any] = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Dict = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : Any = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Optional[int] = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 ) def _lowercase ( self : Dict ) -> Optional[Any]: _a : Any = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Dict = test_model(UpperCAmelCase__ ) _a : Any = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _a : Any = True _a : Union[str, Any] = test_model(UpperCAmelCase__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _lowercase ( self : Optional[Any] ) -> str: _a : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _a : Optional[int] = torch.randn(2 , 3 ) _a : Any = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) ) _a : str = torch.randn(2 , 3 ).to(0 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(0 ) ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : int = torch.randn(2 , 3 ) _a : str = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload _a : List[str] = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Tuple = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Tuple ) -> List[str]: _a : str = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : List[Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : List[str] = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Dict ) -> str: _a : Optional[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : str = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Union[str, Any] = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Any = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
294
0
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny model through reduction of a normal pre-trained model, but keeping the # full vocab, merges file, and thus also resulting in a larger model due to a large vocab size. # This gives ~3MB in total for all files. # # If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated # # # It will be used then as "stas/tiny-wmt19-en-de" # Build from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration _A : str ='''facebook/wmt19-en-de''' _A : List[Any] =FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model _A : Any =FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) _A : List[Any] =FSMTForConditionalGeneration(config) print(F'num of params {tiny_model.num_parameters()}') # Test _A : Tuple =tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A : Optional[int] =tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save _A : Any ='''tiny-wmt19-en-de''' tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-de
41
"""simple docstring""" from __future__ import annotations def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(UpperCamelCase__ ): print(F"""{i}\t\t{d}""" ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for j in range(UpperCamelCase__ ): _a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Dict = [float("""inf""" )] * vertex_count _a : Any = 0.0 for _ in range(vertex_count - 1 ): for j in range(UpperCamelCase__ ): _a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _a : Any = distance[u] + w _a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input('Enter number of vertices: ').strip()) _snake_case = int(input('Enter number of edges: ').strip()) _snake_case = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) _snake_case , _snake_case , _snake_case = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) _snake_case = {'src': src, 'dst': dest, 'weight': weight} _snake_case = int(input('\nEnter shortest path source:').strip()) _snake_case = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
294
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowercase : Optional[Any] = { "configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Optional[Any] = [ "RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ResNetForImageClassification", "ResNetModel", "ResNetPreTrainedModel", "ResNetBackbone", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ "TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFResNetForImageClassification", "TFResNetModel", "TFResNetPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase : Any = [ "FlaxResNetForImageClassification", "FlaxResNetModel", "FlaxResNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_resnet import ( RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, ResNetBackbone, ResNetForImageClassification, ResNetModel, ResNetPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_resnet import ( TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFResNetForImageClassification, TFResNetModel, TFResNetPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel else: import sys lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
42
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowercase = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase_ ( UpperCAmelCase_ ): '''simple docstring''' a__ : int = ["""pixel_values"""] def __init__( self , __lowercase = True , __lowercase = None , __lowercase = PILImageResampling.BICUBIC , __lowercase = True , __lowercase = None , __lowercase = True , __lowercase = 1 / 255 , __lowercase = True , __lowercase = None , __lowercase = None , __lowercase = True , **__lowercase , ) -> None: super().__init__(**__lowercase) __UpperCamelCase :List[Any] = size if size is not None else {'''shortest_edge''': 224} __UpperCamelCase :List[str] = get_size_dict(__lowercase , default_to_square=__lowercase) __UpperCamelCase :Optional[Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} __UpperCamelCase :str = get_size_dict(__lowercase , default_to_square=__lowercase , param_name='''crop_size''') __UpperCamelCase :List[str] = do_resize __UpperCamelCase :Union[str, Any] = size __UpperCamelCase :Dict = resample __UpperCamelCase :Optional[Any] = do_center_crop __UpperCamelCase :Dict = crop_size __UpperCamelCase :List[str] = do_rescale __UpperCamelCase :Any = rescale_factor __UpperCamelCase :Any = do_normalize __UpperCamelCase :int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCamelCase :Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCamelCase :Any = do_convert_rgb def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = PILImageResampling.BICUBIC , __lowercase = None , **__lowercase , ) -> np.ndarray: __UpperCamelCase :List[Any] = get_size_dict(__lowercase , default_to_square=__lowercase) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""") __UpperCamelCase :List[Any] = get_resize_output_image_size(__lowercase , size=size['''shortest_edge'''] , default_to_square=__lowercase) return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase) def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: __UpperCamelCase :Optional[int] = get_size_dict(__lowercase) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""") return center_crop(__lowercase , size=(size['''height'''], size['''width''']) , data_format=__lowercase , **__lowercase) def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> List[str]: return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase) def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase , ) -> np.ndarray: return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase) def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> PIL.Image.Image: __UpperCamelCase :int = do_resize if do_resize is not None else self.do_resize __UpperCamelCase :Dict = size if size is not None else self.size __UpperCamelCase :Union[str, Any] = get_size_dict(__lowercase , param_name='''size''' , default_to_square=__lowercase) __UpperCamelCase :Optional[int] = resample if resample is not None else self.resample __UpperCamelCase :Dict = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCamelCase :List[Any] = crop_size if crop_size is not None else self.crop_size __UpperCamelCase :Optional[Any] = get_size_dict(__lowercase , param_name='''crop_size''' , default_to_square=__lowercase) __UpperCamelCase :Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCamelCase :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCamelCase :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCamelCase :Union[str, Any] = image_mean if image_mean is not None else self.image_mean __UpperCamelCase :List[Any] = image_std if image_std is not None else self.image_std __UpperCamelCase :Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCamelCase :List[str] = make_list_of_images(__lowercase) if not valid_images(__lowercase): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''') if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''') if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''') if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''') if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''') # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCamelCase :Optional[Any] = [convert_to_rgb(__lowercase) for image in images] # All transformations expect numpy arrays. __UpperCamelCase :List[Any] = [to_numpy_array(__lowercase) for image in images] if do_resize: __UpperCamelCase :Optional[Any] = [self.resize(image=__lowercase , size=__lowercase , resample=__lowercase) for image in images] if do_center_crop: __UpperCamelCase :List[Any] = [self.center_crop(image=__lowercase , size=__lowercase) for image in images] if do_rescale: __UpperCamelCase :List[Any] = [self.rescale(image=__lowercase , scale=__lowercase) for image in images] if do_normalize: __UpperCamelCase :Optional[Any] = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase) for image in images] __UpperCamelCase :Dict = [to_channel_dimension_format(__lowercase , __lowercase) for image in images] __UpperCamelCase :Dict = {'''pixel_values''': images} return BatchFeature(data=__lowercase , tensor_type=__lowercase)
43
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' _a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: _a : Tuple = 1 - (matter_density + radiation_density + dark_energy) _a : int = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _a : List[str] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
294
0
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _a : Tuple = logging.get_logger(__name__) _a : Optional[int] = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class __A ( SCREAMING_SNAKE_CASE_ ): _UpperCamelCase : Any = "gptj" _UpperCamelCase : int = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self , a__=50400 , a__=2048 , a__=4096 , a__=28 , a__=16 , a__=64 , a__=None , a__="gelu_new" , a__=0.0 , a__=0.0 , a__=0.0 , a__=1e-5 , a__=0.0_2 , a__=True , a__=50256 , a__=50256 , a__=False , **a__ , ): _lowerCAmelCase : Union[str, Any] = vocab_size _lowerCAmelCase : Dict = n_positions _lowerCAmelCase : Optional[int] = n_embd _lowerCAmelCase : str = n_layer _lowerCAmelCase : Optional[Any] = n_head _lowerCAmelCase : int = n_inner _lowerCAmelCase : int = rotary_dim _lowerCAmelCase : str = activation_function _lowerCAmelCase : Dict = resid_pdrop _lowerCAmelCase : str = embd_pdrop _lowerCAmelCase : int = attn_pdrop _lowerCAmelCase : Optional[int] = layer_norm_epsilon _lowerCAmelCase : Tuple = initializer_range _lowerCAmelCase : Dict = use_cache _lowerCAmelCase : Dict = bos_token_id _lowerCAmelCase : str = eos_token_id super().__init__( bos_token_id=a__ , eos_token_id=a__ , tie_word_embeddings=a__ , **a__ ) class __A ( SCREAMING_SNAKE_CASE_ ): def __init__( self , a__ , a__ = "default" , a__ = None , a__ = False , ): super().__init__(a__ , task=a__ , patching_specs=a__ , use_past=a__ ) if not getattr(self._config , """pad_token_id""" , a__ ): # TODO: how to do that better? _lowerCAmelCase : List[Any] = 0 @property def __A ( self ): _lowerCAmelCase : Optional[Any] = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(a__ , direction="""inputs""" ) _lowerCAmelCase : Any = {0: """batch""", 1: """past_sequence + sequence"""} else: _lowerCAmelCase : Any = {0: """batch""", 1: """sequence"""} return common_inputs @property def __A ( self ): return self._config.n_layer @property def __A ( self ): return self._config.n_head def __A ( self , a__ , a__ = -1 , a__ = -1 , a__ = False , a__ = None , ): _lowerCAmelCase : str = super(a__ , self ).generate_dummy_inputs( a__ , batch_size=a__ , seq_length=a__ , is_pair=a__ , framework=a__ ) # We need to order the input in the way they appears in the forward() _lowerCAmelCase : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch _lowerCAmelCase , _lowerCAmelCase : Dict = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values _lowerCAmelCase : Dict = seqlen + 2 _lowerCAmelCase : Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _lowerCAmelCase : Optional[Any] = [ (torch.zeros(a__ ), torch.zeros(a__ )) for _ in range(self.num_layers ) ] _lowerCAmelCase : List[str] = common_inputs["""attention_mask"""] if self.use_past: _lowerCAmelCase : Tuple = ordered_inputs["""attention_mask"""].dtype _lowerCAmelCase : List[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(a__ , a__ , dtype=a__ )] , dim=1 ) return ordered_inputs @property def __A ( self ): return 13
44
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : List[Any] ) -> Dict: _a : Optional[int] = tempfile.mkdtemp() _a : Optional[Any] = SamImageProcessor() _a : int = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : str ) -> int: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Tuple ) -> Dict: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Dict ) -> Dict: _a : List[Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) -> Tuple: _a : Optional[Any] = self.get_image_processor() _a : int = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Union[str, Any] = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _lowercase ( self : Optional[Any] ) -> Optional[Any]: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = [torch.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : Optional[int] = [[683, 1024]] _a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : int = processor.post_process_masks( UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : Optional[Any] = [np.ones((1, 3, 5, 5) )] _a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : List[str] = [[1, 0], [0, 1]] with self.assertRaises(UpperCAmelCase__ ): _a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) @require_vision @require_tf class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Any ) -> List[str]: _a : List[str] = tempfile.mkdtemp() _a : Any = SamImageProcessor() _a : Union[str, Any] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Dict ) -> List[str]: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: _a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> str: _a : Union[str, Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : int = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _lowercase ( self : Optional[Any] ) -> int: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Any = [tf.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : str = [[683, 1024]] _a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Union[str, Any] = processor.post_process_masks( UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : List[Any] = [np.ones((1, 3, 5, 5) )] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Dict = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _a : List[Any] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : str ) -> Optional[Any]: _a : Optional[Any] = tempfile.mkdtemp() _a : Dict = SamImageProcessor() _a : List[str] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Tuple ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : str ) -> int: _a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _lowercase ( self : int ) -> List[Any]: _a : Optional[Any] = self.get_image_processor() _a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _a : str = [tf.convert_to_tensor(UpperCAmelCase__ )] _a : Optional[int] = [torch.tensor(UpperCAmelCase__ )] _a : Union[str, Any] = [[1764, 2646]] _a : List[str] = [[683, 1024]] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) _a : List[str] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _lowercase ( self : str ) -> Optional[Any]: _a : List[Any] = self.get_image_processor() _a : Any = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Dict = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() _a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
294
0
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch lowercase_ = logging.get_logger(__name__) class __lowerCAmelCase : '''simple docstring''' def __init__( self , _a = None , _a = None , _a=None , _a=None ): if not conversation_id: __a = uuid.uuida() if past_user_inputs is None: __a = [] if generated_responses is None: __a = [] __a = conversation_id __a = past_user_inputs __a = generated_responses __a = text def __eq__( self , _a ): if not isinstance(_a , _a ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def __UpperCAmelCase ( self , _a , _a = False ): if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' f'''with: "{text}".''' ) __a = text else: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: __a = text def __UpperCAmelCase ( self ): if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) __a = None def __UpperCAmelCase ( self , _a ): self.generated_responses.append(_a ) def __UpperCAmelCase ( self ): for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): __a = f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): __a = '''user''' if is_user else '''bot''' output += f'''{name} >> {text} \n''' return output @add_end_docstrings( __SCREAMING_SNAKE_CASE , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self , *_a , **_a ): super().__init__(*_a , **_a ) if self.tokenizer.pad_token_id is None: __a = self.tokenizer.eos_token def __UpperCAmelCase ( self , _a=None , _a=None , _a=None , **_a ): __a = {} __a = {} __a = {} if min_length_for_response is not None: __a = min_length_for_response if minimum_tokens is not None: __a = minimum_tokens if "max_length" in generate_kwargs: __a = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: __a = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(_a ) return preprocess_params, forward_params, postprocess_params def __call__( self , _a , _a=0 , **_a ): __a = super().__call__(_a , num_workers=_a , **_a ) if isinstance(_a , _a ) and len(_a ) == 1: return outputs[0] return outputs def __UpperCAmelCase ( self , _a , _a=32 ): if not isinstance(_a , _a ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): __a = self.tokenizer._build_conversation_input_ids(_a ) else: # If the tokenizer cannot handle conversations, we default to only the old version __a = self._legacy_parse_and_tokenize(_a ) if self.framework == "pt": __a = torch.LongTensor([input_ids] ) elif self.framework == "tf": __a = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def __UpperCAmelCase ( self , _a , _a=10 , **_a ): __a = generate_kwargs.get('''max_length''' , self.model.config.max_length ) __a = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) __a = max_length - minimum_tokens __a = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: __a = model_inputs['''attention_mask'''][:, -trim:] __a = model_inputs.pop('''conversation''' ) __a = max_length __a = self.model.generate(**_a , **_a ) if self.model.config.is_encoder_decoder: __a = 1 else: __a = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def __UpperCAmelCase ( self , _a , _a=True ): __a = model_outputs['''output_ids'''] __a = self.tokenizer.decode( output_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) __a = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(_a ) return conversation def __UpperCAmelCase ( self , _a ): __a = self.tokenizer.eos_token_id __a = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(_a , add_special_tokens=_a ) ) if len(_a ) > self.tokenizer.model_max_length: __a = input_ids[-self.tokenizer.model_max_length :] return input_ids
45
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _snake_case = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } _snake_case = { '169M': 768, '430M': 1024, '1B5': 2048, '3B': 2560, '7B': 4096, '14B': 5120, } def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : int = list(state_dict.keys() ) for name in state_dict_keys: _a : str = state_dict.pop(UpperCamelCase__ ) # emb -> embedding if name.startswith("""emb.""" ): _a : Dict = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): _a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention _a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ ) # ffn -> feed_forward _a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): _a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): _a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): _a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": _a : Optional[int] = """rwkv.""" + name _a : Any = weight return state_dict def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ): '''simple docstring''' # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) _a : Tuple = 5_0_2_7_7 _a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: _a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ ) _a : int = len(UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) # 2. Build the config _a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _a : Tuple = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) _a : List[Any] = RwkvConfig( vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(UpperCamelCase__ ) # 3. Download model file then convert state_dict _a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ ) _a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : List[str] = convert_state_dict(UpperCamelCase__ ) # 4. Split in shards and save _a , _a : List[str] = shard_checkpoint(UpperCamelCase__ ) for shard_file, shard in shards.items(): torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if index is not None: _a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) # Save the index as well with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f: _a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n""" f.write(UpperCamelCase__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) _a : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) _a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" ) tokenizer.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) _snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
294
0
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Base model mapping ("albert", "FlaxAlbertModel"), ("bart", "FlaxBartModel"), ("beit", "FlaxBeitModel"), ("bert", "FlaxBertModel"), ("big_bird", "FlaxBigBirdModel"), ("blenderbot", "FlaxBlenderbotModel"), ("blenderbot-small", "FlaxBlenderbotSmallModel"), ("clip", "FlaxCLIPModel"), ("distilbert", "FlaxDistilBertModel"), ("electra", "FlaxElectraModel"), ("gpt-sw3", "FlaxGPT2Model"), ("gpt2", "FlaxGPT2Model"), ("gpt_neo", "FlaxGPTNeoModel"), ("gptj", "FlaxGPTJModel"), ("longt5", "FlaxLongT5Model"), ("marian", "FlaxMarianModel"), ("mbart", "FlaxMBartModel"), ("mt5", "FlaxMT5Model"), ("opt", "FlaxOPTModel"), ("pegasus", "FlaxPegasusModel"), ("regnet", "FlaxRegNetModel"), ("resnet", "FlaxResNetModel"), ("roberta", "FlaxRobertaModel"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"), ("roformer", "FlaxRoFormerModel"), ("t5", "FlaxT5Model"), ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"), ("vit", "FlaxViTModel"), ("wav2vec2", "FlaxWav2Vec2Model"), ("whisper", "FlaxWhisperModel"), ("xglm", "FlaxXGLMModel"), ("xlm-roberta", "FlaxXLMRobertaModel"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for pre-training mapping ("albert", "FlaxAlbertForPreTraining"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForPreTraining"), ("big_bird", "FlaxBigBirdForPreTraining"), ("electra", "FlaxElectraForPreTraining"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("t5", "FlaxT5ForConditionalGeneration"), ("wav2vec2", "FlaxWav2Vec2ForPreTraining"), ("whisper", "FlaxWhisperForConditionalGeneration"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Masked LM mapping ("albert", "FlaxAlbertForMaskedLM"), ("bart", "FlaxBartForConditionalGeneration"), ("bert", "FlaxBertForMaskedLM"), ("big_bird", "FlaxBigBirdForMaskedLM"), ("distilbert", "FlaxDistilBertForMaskedLM"), ("electra", "FlaxElectraForMaskedLM"), ("mbart", "FlaxMBartForConditionalGeneration"), ("roberta", "FlaxRobertaForMaskedLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"), ("roformer", "FlaxRoFormerForMaskedLM"), ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("bart", "FlaxBartForConditionalGeneration"), ("blenderbot", "FlaxBlenderbotForConditionalGeneration"), ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"), ("encoder-decoder", "FlaxEncoderDecoderModel"), ("longt5", "FlaxLongT5ForConditionalGeneration"), ("marian", "FlaxMarianMTModel"), ("mbart", "FlaxMBartForConditionalGeneration"), ("mt5", "FlaxMT5ForConditionalGeneration"), ("pegasus", "FlaxPegasusForConditionalGeneration"), ("t5", "FlaxT5ForConditionalGeneration"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Image-classsification ("beit", "FlaxBeitForImageClassification"), ("regnet", "FlaxRegNetForImageClassification"), ("resnet", "FlaxResNetForImageClassification"), ("vit", "FlaxViTForImageClassification"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Causal LM mapping ("bart", "FlaxBartForCausalLM"), ("bert", "FlaxBertForCausalLM"), ("big_bird", "FlaxBigBirdForCausalLM"), ("electra", "FlaxElectraForCausalLM"), ("gpt-sw3", "FlaxGPT2LMHeadModel"), ("gpt2", "FlaxGPT2LMHeadModel"), ("gpt_neo", "FlaxGPTNeoForCausalLM"), ("gptj", "FlaxGPTJForCausalLM"), ("opt", "FlaxOPTForCausalLM"), ("roberta", "FlaxRobertaForCausalLM"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"), ("xglm", "FlaxXGLMForCausalLM"), ("xlm-roberta", "FlaxXLMRobertaForCausalLM"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Sequence Classification mapping ("albert", "FlaxAlbertForSequenceClassification"), ("bart", "FlaxBartForSequenceClassification"), ("bert", "FlaxBertForSequenceClassification"), ("big_bird", "FlaxBigBirdForSequenceClassification"), ("distilbert", "FlaxDistilBertForSequenceClassification"), ("electra", "FlaxElectraForSequenceClassification"), ("mbart", "FlaxMBartForSequenceClassification"), ("roberta", "FlaxRobertaForSequenceClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"), ("roformer", "FlaxRoFormerForSequenceClassification"), ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Question Answering mapping ("albert", "FlaxAlbertForQuestionAnswering"), ("bart", "FlaxBartForQuestionAnswering"), ("bert", "FlaxBertForQuestionAnswering"), ("big_bird", "FlaxBigBirdForQuestionAnswering"), ("distilbert", "FlaxDistilBertForQuestionAnswering"), ("electra", "FlaxElectraForQuestionAnswering"), ("mbart", "FlaxMBartForQuestionAnswering"), ("roberta", "FlaxRobertaForQuestionAnswering"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"), ("roformer", "FlaxRoFormerForQuestionAnswering"), ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Token Classification mapping ("albert", "FlaxAlbertForTokenClassification"), ("bert", "FlaxBertForTokenClassification"), ("big_bird", "FlaxBigBirdForTokenClassification"), ("distilbert", "FlaxDistilBertForTokenClassification"), ("electra", "FlaxElectraForTokenClassification"), ("roberta", "FlaxRobertaForTokenClassification"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"), ("roformer", "FlaxRoFormerForTokenClassification"), ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ # Model for Multiple Choice mapping ("albert", "FlaxAlbertForMultipleChoice"), ("bert", "FlaxBertForMultipleChoice"), ("big_bird", "FlaxBigBirdForMultipleChoice"), ("distilbert", "FlaxDistilBertForMultipleChoice"), ("electra", "FlaxElectraForMultipleChoice"), ("roberta", "FlaxRobertaForMultipleChoice"), ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"), ("roformer", "FlaxRoFormerForMultipleChoice"), ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("bert", "FlaxBertForNextSentencePrediction"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"), ("whisper", "FlaxWhisperForConditionalGeneration"), ] ) SCREAMING_SNAKE_CASE__ = OrderedDict( [ ("whisper", "FlaxWhisperForAudioClassification"), ] ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) SCREAMING_SNAKE_CASE__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModel) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_PRETRAINING_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="sequence-to-sequence language modeling", checkpoint_for_example="t5-base" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="sequence classification" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="token classification" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="image classification" ) class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="vision-to-text modeling") class lowercase ( _BaseAutoModelClass ): _SCREAMING_SNAKE_CASE = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING SCREAMING_SNAKE_CASE__ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="sequence-to-sequence speech-to-text modeling" )
46
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> List[str]: _a : Any = """laion/clap-htsat-unfused""" _a : Union[str, Any] = tempfile.mkdtemp() def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict: return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : List[str] ) -> Optional[int]: _a : List[str] = self.get_tokenizer() _a : Any = self.get_feature_extractor() _a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) _a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> Optional[int]: _a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> Optional[Any]: _a : Optional[int] = self.get_feature_extractor() _a : Tuple = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = floats_list((3, 1000) ) _a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Tuple ) -> Optional[int]: _a : List[str] = self.get_feature_extractor() _a : Any = self.get_tokenizer() _a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Optional[int] = """This is a test string""" _a : Tuple = processor(text=UpperCAmelCase__ ) _a : int = tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : List[Any] ) -> Any: _a : str = self.get_feature_extractor() _a : List[str] = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : Dict = processor.batch_decode(UpperCAmelCase__ ) _a : Any = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> List[str]: _a : str = self.get_feature_extractor() _a : Optional[Any] = self.get_tokenizer() _a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
294
0
'''simple docstring''' import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class A__ ( A__ ): def A ( self : str ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_a , 'width_multiplier' ) ) class A__ : def __init__( self : Any , _a : str , _a : Dict=13 , _a : Any=64 , _a : Any=2 , _a : Dict=3 , _a : List[Any]="swish" , _a : Any=3 , _a : str=32 , _a : str=0.1 , _a : Optional[int]=0.02 , _a : Dict=True , _a : Union[str, Any]=True , _a : List[str]=10 , _a : List[Any]=None , _a : List[str]=0.25 , _a : List[str]=0.0 , _a : int=0.0 , ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =parent _SCREAMING_SNAKE_CASE =batch_size _SCREAMING_SNAKE_CASE =image_size _SCREAMING_SNAKE_CASE =patch_size _SCREAMING_SNAKE_CASE =num_channels _SCREAMING_SNAKE_CASE =make_divisible(512 * width_multiplier , divisor=8 ) _SCREAMING_SNAKE_CASE =hidden_act _SCREAMING_SNAKE_CASE =conv_kernel_size _SCREAMING_SNAKE_CASE =output_stride _SCREAMING_SNAKE_CASE =classifier_dropout_prob _SCREAMING_SNAKE_CASE =use_labels _SCREAMING_SNAKE_CASE =is_training _SCREAMING_SNAKE_CASE =num_labels _SCREAMING_SNAKE_CASE =initializer_range _SCREAMING_SNAKE_CASE =scope _SCREAMING_SNAKE_CASE =width_multiplier _SCREAMING_SNAKE_CASE =ffn_dropout _SCREAMING_SNAKE_CASE =attn_dropout def A ( self : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _SCREAMING_SNAKE_CASE =None _SCREAMING_SNAKE_CASE =None if self.use_labels: _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels ) _SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) _SCREAMING_SNAKE_CASE =self.get_config() return config, pixel_values, labels, pixel_labels def A ( self : Any ) -> List[Any]: '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , ) def A ( self : List[str] , _a : Optional[Any] , _a : List[Any] , _a : List[Any] , _a : Optional[Any] ) -> Optional[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MobileViTVaModel(config=_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A ( self : Optional[Any] , _a : List[Any] , _a : int , _a : Tuple , _a : Tuple ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MobileViTVaForImageClassification(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : int , _a : Dict , _a : Tuple , _a : Tuple , _a : Dict ) -> Tuple: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.num_labels _SCREAMING_SNAKE_CASE =MobileViTVaForSemanticSegmentation(_a ) model.to(_a ) model.eval() _SCREAMING_SNAKE_CASE =model(_a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) _SCREAMING_SNAKE_CASE =model(_a , labels=_a ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def A ( self : Dict ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs _SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values} return config, inputs_dict @require_torch class A__ ( A__ , A__ , unittest.TestCase ): A__ = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) A__ = False A__ = False A__ = False A__ = False def A ( self : Dict ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MobileViTVaModelTester(self ) _SCREAMING_SNAKE_CASE =MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a ) def A ( self : Optional[int] ) -> List[str]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def A ( self : Optional[int] ) -> Any: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def A ( self : Dict ) -> Any: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def A ( self : List[Any] ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def A ( self : str ) -> str: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def A ( self : List[Any] ) -> List[str]: '''simple docstring''' pass def A ( self : int ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE =model_class(_a ) _SCREAMING_SNAKE_CASE =inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE =[*signature.parameters.keys()] _SCREAMING_SNAKE_CASE =['pixel_values'] self.assertListEqual(arg_names[:1] , _a ) def A ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def A ( self : str ) -> Optional[Any]: '''simple docstring''' def check_hidden_states_output(_a : str , _a : Optional[int] , _a : Dict ): _SCREAMING_SNAKE_CASE =model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) ) _SCREAMING_SNAKE_CASE =outputs.hidden_states _SCREAMING_SNAKE_CASE =5 self.assertEqual(len(_a ) , _a ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. _SCREAMING_SNAKE_CASE =2 for i in range(len(_a ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , ) divisor *= 2 self.assertEqual(self.model_tester.output_stride , divisor // 2 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE =True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE =True check_hidden_states_output(_a , _a , _a ) def A ( self : int ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) def A ( self : Dict ) -> Union[str, Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_a ) @slow def A ( self : int ) -> Optional[Any]: '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE =MobileViTVaModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def _lowerCAmelCase ( ) -> Dict: """simple docstring""" _SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class A__ ( unittest.TestCase ): @cached_property def A ( self : int ) -> Optional[Any]: '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def A ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( _a ) _SCREAMING_SNAKE_CASE =self.default_image_processor _SCREAMING_SNAKE_CASE =prepare_img() _SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='pt' ).to(_a ) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(**_a ) # verify the logits _SCREAMING_SNAKE_CASE =torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) ) @slow def A ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _SCREAMING_SNAKE_CASE =model.to(_a ) _SCREAMING_SNAKE_CASE =MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _SCREAMING_SNAKE_CASE =prepare_img() _SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='pt' ).to(_a ) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(**_a ) _SCREAMING_SNAKE_CASE =outputs.logits # verify the logits _SCREAMING_SNAKE_CASE =torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape , _a ) _SCREAMING_SNAKE_CASE =torch.tensor( [ [[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]], [[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]], [[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]], ] , device=_a , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) ) @slow def A ( self : Optional[int] ) -> List[Any]: '''simple docstring''' _SCREAMING_SNAKE_CASE =MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _SCREAMING_SNAKE_CASE =model.to(_a ) _SCREAMING_SNAKE_CASE =MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) _SCREAMING_SNAKE_CASE =prepare_img() _SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='pt' ).to(_a ) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE =model(**_a ) _SCREAMING_SNAKE_CASE =outputs.logits.detach().cpu() _SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] ) _SCREAMING_SNAKE_CASE =torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape , _a ) _SCREAMING_SNAKE_CASE =image_processor.post_process_semantic_segmentation(outputs=_a ) _SCREAMING_SNAKE_CASE =torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape , _a )
47
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case = logging.get_logger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
294
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool SCREAMING_SNAKE_CASE__ : Tuple = { 'Acehnese Arabic': 'ace_Arab', 'Acehnese Latin': 'ace_Latn', 'Mesopotamian Arabic': 'acm_Arab', 'Ta\'izzi-Adeni Arabic': 'acq_Arab', 'Tunisian Arabic': 'aeb_Arab', 'Afrikaans': 'afr_Latn', 'South Levantine Arabic': 'ajp_Arab', 'Akan': 'aka_Latn', 'Amharic': 'amh_Ethi', 'North Levantine Arabic': 'apc_Arab', 'Modern Standard Arabic': 'arb_Arab', 'Modern Standard Arabic Romanized': 'arb_Latn', 'Najdi Arabic': 'ars_Arab', 'Moroccan Arabic': 'ary_Arab', 'Egyptian Arabic': 'arz_Arab', 'Assamese': 'asm_Beng', 'Asturian': 'ast_Latn', 'Awadhi': 'awa_Deva', 'Central Aymara': 'ayr_Latn', 'South Azerbaijani': 'azb_Arab', 'North Azerbaijani': 'azj_Latn', 'Bashkir': 'bak_Cyrl', 'Bambara': 'bam_Latn', 'Balinese': 'ban_Latn', 'Belarusian': 'bel_Cyrl', 'Bemba': 'bem_Latn', 'Bengali': 'ben_Beng', 'Bhojpuri': 'bho_Deva', 'Banjar Arabic': 'bjn_Arab', 'Banjar Latin': 'bjn_Latn', 'Standard Tibetan': 'bod_Tibt', 'Bosnian': 'bos_Latn', 'Buginese': 'bug_Latn', 'Bulgarian': 'bul_Cyrl', 'Catalan': 'cat_Latn', 'Cebuano': 'ceb_Latn', 'Czech': 'ces_Latn', 'Chokwe': 'cjk_Latn', 'Central Kurdish': 'ckb_Arab', 'Crimean Tatar': 'crh_Latn', 'Welsh': 'cym_Latn', 'Danish': 'dan_Latn', 'German': 'deu_Latn', 'Southwestern Dinka': 'dik_Latn', 'Dyula': 'dyu_Latn', 'Dzongkha': 'dzo_Tibt', 'Greek': 'ell_Grek', 'English': 'eng_Latn', 'Esperanto': 'epo_Latn', 'Estonian': 'est_Latn', 'Basque': 'eus_Latn', 'Ewe': 'ewe_Latn', 'Faroese': 'fao_Latn', 'Fijian': 'fij_Latn', 'Finnish': 'fin_Latn', 'Fon': 'fon_Latn', 'French': 'fra_Latn', 'Friulian': 'fur_Latn', 'Nigerian Fulfulde': 'fuv_Latn', 'Scottish Gaelic': 'gla_Latn', 'Irish': 'gle_Latn', 'Galician': 'glg_Latn', 'Guarani': 'grn_Latn', 'Gujarati': 'guj_Gujr', 'Haitian Creole': 'hat_Latn', 'Hausa': 'hau_Latn', 'Hebrew': 'heb_Hebr', 'Hindi': 'hin_Deva', 'Chhattisgarhi': 'hne_Deva', 'Croatian': 'hrv_Latn', 'Hungarian': 'hun_Latn', 'Armenian': 'hye_Armn', 'Igbo': 'ibo_Latn', 'Ilocano': 'ilo_Latn', 'Indonesian': 'ind_Latn', 'Icelandic': 'isl_Latn', 'Italian': 'ita_Latn', 'Javanese': 'jav_Latn', 'Japanese': 'jpn_Jpan', 'Kabyle': 'kab_Latn', 'Jingpho': 'kac_Latn', 'Kamba': 'kam_Latn', 'Kannada': 'kan_Knda', 'Kashmiri Arabic': 'kas_Arab', 'Kashmiri Devanagari': 'kas_Deva', 'Georgian': 'kat_Geor', 'Central Kanuri Arabic': 'knc_Arab', 'Central Kanuri Latin': 'knc_Latn', 'Kazakh': 'kaz_Cyrl', 'Kabiyè': 'kbp_Latn', 'Kabuverdianu': 'kea_Latn', 'Khmer': 'khm_Khmr', 'Kikuyu': 'kik_Latn', 'Kinyarwanda': 'kin_Latn', 'Kyrgyz': 'kir_Cyrl', 'Kimbundu': 'kmb_Latn', 'Northern Kurdish': 'kmr_Latn', 'Kikongo': 'kon_Latn', 'Korean': 'kor_Hang', 'Lao': 'lao_Laoo', 'Ligurian': 'lij_Latn', 'Limburgish': 'lim_Latn', 'Lingala': 'lin_Latn', 'Lithuanian': 'lit_Latn', 'Lombard': 'lmo_Latn', 'Latgalian': 'ltg_Latn', 'Luxembourgish': 'ltz_Latn', 'Luba-Kasai': 'lua_Latn', 'Ganda': 'lug_Latn', 'Luo': 'luo_Latn', 'Mizo': 'lus_Latn', 'Standard Latvian': 'lvs_Latn', 'Magahi': 'mag_Deva', 'Maithili': 'mai_Deva', 'Malayalam': 'mal_Mlym', 'Marathi': 'mar_Deva', 'Minangkabau Arabic ': 'min_Arab', 'Minangkabau Latin': 'min_Latn', 'Macedonian': 'mkd_Cyrl', 'Plateau Malagasy': 'plt_Latn', 'Maltese': 'mlt_Latn', 'Meitei Bengali': 'mni_Beng', 'Halh Mongolian': 'khk_Cyrl', 'Mossi': 'mos_Latn', 'Maori': 'mri_Latn', 'Burmese': 'mya_Mymr', 'Dutch': 'nld_Latn', 'Norwegian Nynorsk': 'nno_Latn', 'Norwegian Bokmål': 'nob_Latn', 'Nepali': 'npi_Deva', 'Northern Sotho': 'nso_Latn', 'Nuer': 'nus_Latn', 'Nyanja': 'nya_Latn', 'Occitan': 'oci_Latn', 'West Central Oromo': 'gaz_Latn', 'Odia': 'ory_Orya', 'Pangasinan': 'pag_Latn', 'Eastern Panjabi': 'pan_Guru', 'Papiamento': 'pap_Latn', 'Western Persian': 'pes_Arab', 'Polish': 'pol_Latn', 'Portuguese': 'por_Latn', 'Dari': 'prs_Arab', 'Southern Pashto': 'pbt_Arab', 'Ayacucho Quechua': 'quy_Latn', 'Romanian': 'ron_Latn', 'Rundi': 'run_Latn', 'Russian': 'rus_Cyrl', 'Sango': 'sag_Latn', 'Sanskrit': 'san_Deva', 'Santali': 'sat_Olck', 'Sicilian': 'scn_Latn', 'Shan': 'shn_Mymr', 'Sinhala': 'sin_Sinh', 'Slovak': 'slk_Latn', 'Slovenian': 'slv_Latn', 'Samoan': 'smo_Latn', 'Shona': 'sna_Latn', 'Sindhi': 'snd_Arab', 'Somali': 'som_Latn', 'Southern Sotho': 'sot_Latn', 'Spanish': 'spa_Latn', 'Tosk Albanian': 'als_Latn', 'Sardinian': 'srd_Latn', 'Serbian': 'srp_Cyrl', 'Swati': 'ssw_Latn', 'Sundanese': 'sun_Latn', 'Swedish': 'swe_Latn', 'Swahili': 'swh_Latn', 'Silesian': 'szl_Latn', 'Tamil': 'tam_Taml', 'Tatar': 'tat_Cyrl', 'Telugu': 'tel_Telu', 'Tajik': 'tgk_Cyrl', 'Tagalog': 'tgl_Latn', 'Thai': 'tha_Thai', 'Tigrinya': 'tir_Ethi', 'Tamasheq Latin': 'taq_Latn', 'Tamasheq Tifinagh': 'taq_Tfng', 'Tok Pisin': 'tpi_Latn', 'Tswana': 'tsn_Latn', 'Tsonga': 'tso_Latn', 'Turkmen': 'tuk_Latn', 'Tumbuka': 'tum_Latn', 'Turkish': 'tur_Latn', 'Twi': 'twi_Latn', 'Central Atlas Tamazight': 'tzm_Tfng', 'Uyghur': 'uig_Arab', 'Ukrainian': 'ukr_Cyrl', 'Umbundu': 'umb_Latn', 'Urdu': 'urd_Arab', 'Northern Uzbek': 'uzn_Latn', 'Venetian': 'vec_Latn', 'Vietnamese': 'vie_Latn', 'Waray': 'war_Latn', 'Wolof': 'wol_Latn', 'Xhosa': 'xho_Latn', 'Eastern Yiddish': 'ydd_Hebr', 'Yoruba': 'yor_Latn', 'Yue Chinese': 'yue_Hant', 'Chinese Simplified': 'zho_Hans', 'Chinese Traditional': 'zho_Hant', 'Standard Malay': 'zsm_Latn', 'Zulu': 'zul_Latn', } class UpperCamelCase__ (lowerCAmelCase__ ): '''simple docstring''' lowerCamelCase_ : Union[str, Any] = """facebook/nllb-200-distilled-600M""" lowerCamelCase_ : List[Any] = ( """This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """ """be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """ """which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """ """plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`.""" ) lowerCamelCase_ : Any = """translator""" lowerCamelCase_ : Any = AutoTokenizer lowerCamelCase_ : List[Any] = AutoModelForSeqaSeqLM lowerCamelCase_ : Optional[int] = LANGUAGE_CODES lowerCamelCase_ : Union[str, Any] = ["""text""", """text""", """text"""] lowerCamelCase_ : Dict = ["""text"""] def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''' ) if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''' ) lowerCamelCase : Union[str, Any] = self.lang_to_code[src_lang] lowerCamelCase : List[Any] = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( UpperCamelCase__ , return_tensors="pt" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Any: return self.model.generate(**UpperCamelCase__ ) def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]: return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__ )
48
"""simple docstring""" import unittest import numpy as np def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ): '''simple docstring''' _a : List[Any] = np.shape(UpperCamelCase__ ) _a : Any = np.shape(UpperCamelCase__ ) _a : Union[str, Any] = np.shape(UpperCamelCase__ ) if shape_a[0] != shape_b[0]: _a : int = ( """Expected the same number of rows for A and B. """ F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(UpperCamelCase__ ) if shape_b[1] != shape_c[1]: _a : Tuple = ( """Expected the same number of columns for B and C. """ F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(UpperCamelCase__ ) _a : int = pseudo_inv if a_inv is None: try: _a : Optional[int] = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> None: _a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Optional[int] = np.array([[2, 1], [6, 3]] ) _a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _a : Union[str, Any] = np.block([[a, b], [b.T, c]] ) _a : int = np.linalg.det(UpperCAmelCase__ ) _a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ ) _a : List[Any] = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def _lowercase ( self : int ) -> None: _a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> None: _a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) _a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
294
0
from collections.abc import Generator from math import sin def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) != 32: raise ValueError('''Input must be of length 32''' ) __a = b'''''' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''08x''' )[-8:] __a = b'''''' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' ) return little_endian_hex def __snake_case ( _UpperCAmelCase ): __a = b'''''' for char in message: bit_string += format(_UpperCAmelCase , '''08b''' ).encode('''utf-8''' ) __a = format(len(_UpperCAmelCase ) , '''064b''' ).encode('''utf-8''' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(_UpperCAmelCase ) % 512 != 448: bit_string += b"0" bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] ) return bit_string def __snake_case ( _UpperCAmelCase ): if len(_UpperCAmelCase ) % 512 != 0: raise ValueError('''Input must have length that\'s a multiple of 512''' ) for pos in range(0 , len(_UpperCAmelCase ) , 512 ): __a = bit_string[pos : pos + 512] __a = [] for i in range(0 , 512 , 32 ): block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) ) yield block_words def __snake_case ( _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) __a = format(_UpperCAmelCase , '''032b''' ) __a = '''''' for c in i_str: new_str += "1" if c == "0" else "0" return int(_UpperCAmelCase , 2 ) def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): return (a + b) % 2**32 def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ): if i < 0: raise ValueError('''Input must be non-negative''' ) if shift < 0: raise ValueError('''Shift must be non-negative''' ) return ((i << shift) ^ (i >> (32 - shift))) % 2**32 def __snake_case ( _UpperCAmelCase ): __a = preprocess(_UpperCAmelCase ) __a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )] # Starting states __a = 0X67_452_301 __a = 0Xef_cda_b89 __a = 0X98_bad_cfe __a = 0X10_325_476 __a = [ 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 7, 12, 17, 22, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 5, 9, 14, 20, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 4, 11, 16, 23, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, 6, 10, 15, 21, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(_UpperCAmelCase ): __a = aa __a = ba __a = ca __a = da # Hash current chunk for i in range(64 ): if i <= 15: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __a = d ^ (b & (c ^ d)) __a = i elif i <= 31: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __a = c ^ (d & (b ^ c)) __a = (5 * i + 1) % 16 elif i <= 47: __a = b ^ c ^ d __a = (3 * i + 5) % 16 else: __a = c ^ (b | not_aa(_UpperCAmelCase )) __a = (7 * i) % 16 __a = (f + a + added_consts[i] + block_words[g]) % 2**32 __a = d __a = c __a = b __a = sum_aa(_UpperCAmelCase , left_rotate_aa(_UpperCAmelCase , shift_amounts[i] ) ) # Add hashed chunk to running total __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = sum_aa(_UpperCAmelCase , _UpperCAmelCase ) __a = reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) + reformat_hex(_UpperCAmelCase ) return digest if __name__ == "__main__": import doctest doctest.testmod()
49
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = torch.device('cpu') def lowerCAmelCase__ ( ): '''simple docstring''' _a : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" _a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Any = dct.pop(UpperCamelCase__ ) _a : Dict = val def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : Tuple = [] for k in state_dict.keys(): _a : Any = k if ".pwconv" in k: _a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _a : int = k_new.split(""".""" ) if ls[2].isdigit(): _a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _a : Optional[int] = 1_0_0_0 _a : Optional[Any] = """huggingface/label-files""" _a : Optional[Any] = """imagenet-1k-id2label.json""" _a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) _a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _a : Dict = idalabel _a : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _a : Any = [3, 3, 6, 4] _a : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": _a : Any = [3, 3, 9, 6] _a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": _a : List[Any] = [4, 3, 1_0, 5] _a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": _a : List[Any] = [4, 4, 1_2, 6] _a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ ) else: _a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : int = checkpoint _a : Optional[Any] = create_rename_keys(UpperCamelCase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model _a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval() hf_model.load_state_dict(UpperCamelCase__ ) # prepare test inputs _a : Any = prepare_img() _a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" ) # compare outputs from both models _a : Dict = get_expected_output(UpperCamelCase__ ) _a : int = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') _snake_case = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
294
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class lowerCAmelCase ( unittest.TestCase ): def __init__( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Any=7 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : List[Any]=18 , UpperCAmelCase : Dict=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : str=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5] , UpperCAmelCase : List[str]=None , ) -> Any: lowerCamelCase__ : Any = size if size is not None else {'shortest_edge': 18} lowerCamelCase__ : List[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowerCamelCase__ : Tuple = parent lowerCamelCase__ : List[Any] = batch_size lowerCamelCase__ : Dict = num_channels lowerCamelCase__ : Union[str, Any] = num_frames lowerCamelCase__ : Any = image_size lowerCamelCase__ : int = min_resolution lowerCamelCase__ : Tuple = max_resolution lowerCamelCase__ : List[str] = do_resize lowerCamelCase__ : List[str] = size lowerCamelCase__ : Union[str, Any] = do_normalize lowerCamelCase__ : List[str] = image_mean lowerCamelCase__ : Tuple = image_std lowerCamelCase__ : Tuple = crop_size def A_ ( self : Tuple ) -> List[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCAmelCase ( __UpperCamelCase, unittest.TestCase ): UpperCAmelCase__ = VivitImageProcessor if is_vision_available() else None def A_ ( self : Optional[Any] ) -> Union[str, Any]: lowerCamelCase__ : Optional[int] = VivitImageProcessingTester(self ) @property def A_ ( self : int ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def A_ ( self : Dict ) -> Any: lowerCamelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase , 'image_mean' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'image_std' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_normalize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_resize' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'do_center_crop' ) ) self.assertTrue(hasattr(UpperCAmelCase , 'size' ) ) def A_ ( self : Dict ) -> Optional[Any]: lowerCamelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) lowerCamelCase__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def A_ ( self : List[Any] ) -> Optional[int]: # Initialize image_processing lowerCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowerCamelCase__ : List[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase ) for video in video_inputs: self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input lowerCamelCase__ : str = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCamelCase__ : Optional[int] = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def A_ ( self : int ) -> Union[str, Any]: # Initialize image_processing lowerCamelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase__ : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase ) for video in video_inputs: self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input lowerCamelCase__ : Any = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def A_ ( self : Dict ) -> Optional[int]: # Initialize image_processing lowerCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase__ : Union[str, Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase ) for video in video_inputs: self.assertIsInstance(UpperCAmelCase , UpperCAmelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input lowerCamelCase__ : Union[str, Any] = image_processing(video_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched lowerCamelCase__ : Tuple = image_processing(UpperCAmelCase , return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
50
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['PerceiverFeatureExtractor'] _snake_case = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
snake_case_ : Dict = { "Pillow": "Pillow", "accelerate": "accelerate>=0.11.0", "compel": "compel==0.1.8", "black": "black~=23.1", "datasets": "datasets", "filelock": "filelock", "flax": "flax>=0.4.1", "hf-doc-builder": "hf-doc-builder>=0.3.0", "huggingface-hub": "huggingface-hub>=0.13.2", "requests-mock": "requests-mock==1.10.0", "importlib_metadata": "importlib_metadata", "invisible-watermark": "invisible-watermark", "isort": "isort>=5.5.4", "jax": "jax>=0.2.8,!=0.3.2", "jaxlib": "jaxlib>=0.1.65", "Jinja2": "Jinja2", "k-diffusion": "k-diffusion>=0.0.12", "torchsde": "torchsde", "note_seq": "note_seq", "librosa": "librosa", "numpy": "numpy", "omegaconf": "omegaconf", "parameterized": "parameterized", "protobuf": "protobuf>=3.20.3,<4", "pytest": "pytest", "pytest-timeout": "pytest-timeout", "pytest-xdist": "pytest-xdist", "ruff": "ruff>=0.0.241", "safetensors": "safetensors", "sentencepiece": "sentencepiece>=0.1.91,!=0.1.92", "scipy": "scipy", "onnx": "onnx", "regex": "regex!=2019.12.17", "requests": "requests", "tensorboard": "tensorboard", "torch": "torch>=1.4", "torchvision": "torchvision", "transformers": "transformers>=4.25.1", "urllib3": "urllib3<=2.0.0", }
51
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict: if k in (0.0_4, 0.0_6): _a : List[str] = k _a : List[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Dict ) -> str: return str(self.k ) def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]: _a : Dict = cva.imread(UpperCAmelCase__ , 0 ) _a , _a : List[Any] = img.shape _a : list[list[int]] = [] _a : List[Any] = img.copy() _a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB ) _a , _a : Any = np.gradient(UpperCAmelCase__ ) _a : Tuple = dx**2 _a : Union[str, Any] = dy**2 _a : Union[str, Any] = dx * dy _a : int = 0.0_4 _a : List[str] = self.window_size // 2 for y in range(UpperCAmelCase__ , h - offset ): for x in range(UpperCAmelCase__ , w - offset ): _a : str = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : List[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Any = (wxx * wyy) - (wxy**2) _a : Tuple = wxx + wyy _a : Any = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _snake_case = HarrisCorner(0.04, 3) _snake_case , _snake_case = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
294
0
class A__ : def __init__( self , A_ , A_=None , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = data UpperCamelCase : Optional[int] = previous UpperCamelCase : Optional[Any] = next_node def __str__( self ): '''simple docstring''' return F"""{self.data}""" def __UpperCamelCase( self ): '''simple docstring''' return self.data def __UpperCamelCase( self ): '''simple docstring''' return self.next def __UpperCamelCase( self ): '''simple docstring''' return self.previous class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = head def __iter__( self ): '''simple docstring''' return self def __UpperCamelCase( self ): '''simple docstring''' if not self.current: raise StopIteration else: UpperCamelCase : Optional[Any] = self.current.get_data() UpperCamelCase : List[str] = self.current.get_next() return value class A__ : def __init__( self ): '''simple docstring''' UpperCamelCase : Any = None # First node in list UpperCamelCase : Optional[int] = None # Last node in list def __str__( self ): '''simple docstring''' UpperCamelCase : str = self.head UpperCamelCase : Tuple = [] while current is not None: nodes.append(current.get_data() ) UpperCamelCase : Dict = current.get_next() return " ".join(str(A_ ) for node in nodes ) def __contains__( self , A_ ): '''simple docstring''' UpperCamelCase : int = self.head while current: if current.get_data() == value: return True UpperCamelCase : Tuple = current.get_next() return False def __iter__( self ): '''simple docstring''' return LinkedListIterator(self.head ) def __UpperCamelCase( self ): '''simple docstring''' if self.head: return self.head.get_data() return None def __UpperCamelCase( self ): '''simple docstring''' if self.tail: return self.tail.get_data() return None def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.head is None: UpperCamelCase : List[Any] = node UpperCamelCase : Union[str, Any] = node else: self.insert_before_node(self.head , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.head is None: self.set_head(A_ ) else: self.insert_after_node(self.tail , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = Node(A_ ) if self.head is None: self.set_head(A_ ) else: self.set_tail(A_ ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = node UpperCamelCase : List[Any] = node.previous if node.get_previous() is None: UpperCamelCase : str = node_to_insert else: UpperCamelCase : Dict = node_to_insert UpperCamelCase : Optional[Any] = node_to_insert def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = node UpperCamelCase : Optional[Any] = node.next if node.get_next() is None: UpperCamelCase : List[str] = node_to_insert else: UpperCamelCase : int = node_to_insert UpperCamelCase : Union[str, Any] = node_to_insert def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = 1 UpperCamelCase : Tuple = Node(A_ ) UpperCamelCase : Optional[int] = self.head while node: if current_position == position: self.insert_before_node(A_ , A_ ) return current_position += 1 UpperCamelCase : Union[str, Any] = node.next self.insert_after_node(self.tail , A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = self.head while node: if node.get_data() == item: return node UpperCamelCase : int = node.get_next() raise Exception("Node not found" ) def __UpperCamelCase( self , A_ ): '''simple docstring''' if (node := self.get_node(A_ )) is not None: if node == self.head: UpperCamelCase : Optional[Any] = self.head.get_next() if node == self.tail: UpperCamelCase : Dict = self.tail.get_previous() self.remove_node_pointers(A_ ) @staticmethod def __UpperCamelCase( A_ ): '''simple docstring''' if node.get_next(): UpperCamelCase : List[str] = node.previous if node.get_previous(): UpperCamelCase : Dict = node.next UpperCamelCase : Any = None UpperCamelCase : Optional[Any] = None def __UpperCamelCase( self ): '''simple docstring''' return self.head is None def A_ ( ) -> None: pass if __name__ == "__main__": import doctest doctest.testmod()
52
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' # Check if the input is valid if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients _a , _a , _a : Any = equationa _a , _a , _a : Tuple = equationa # Calculate the determinants of the matrices _a : int = aa * ba - aa * ba _a : str = ca * ba - ca * ba _a : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _a : Dict = determinant_x / determinant _a : str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
294
0
'''simple docstring''' import torch from transformers import AutoModel class snake_case ( torch.nn.Module ): """simple docstring""" def __init__( self : Optional[int] , __A : List[Any]="sayef/fsner-bert-base-uncased" ): super(__A , self ).__init__() __UpperCamelCase = AutoModel.from_pretrained(__A , return_dict=__A ) __UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 ) __UpperCamelCase = torch.nn.Softmax(dim=1 ) def _lowerCamelCase ( self : Tuple , **__A : Optional[int] ): return self.bert(**__A ).last_hidden_state def _lowerCamelCase ( self : Tuple , __A : Tuple ): return token_embeddings.sum(2 , keepdim=__A ) def _lowerCamelCase ( self : List[Any] , __A : str , __A : int , __A : str=1 ): return self.softmax(T * self.cos(__A , __A ) ) def _lowerCamelCase ( self : Optional[int] , __A : str , __A : Any ): __UpperCamelCase = W_supports['sizes'].tolist() __UpperCamelCase = W_supports['start_token_id'].item() __UpperCamelCase = W_supports['end_token_id'].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] __UpperCamelCase = self.BERT(**__A ) __UpperCamelCase = self.BERT(**__A ) __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = W_supports['input_ids'] == start_token_id __UpperCamelCase = W_supports['input_ids'] == end_token_id for i, size in enumerate(__A ): if i == 0: __UpperCamelCase = 0 else: __UpperCamelCase = support_sizes[i - 1] __UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]] __UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]] __UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 ) __UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 ) if p_starts is not None: __UpperCamelCase = torch.vstack((p_starts, p_start) ) __UpperCamelCase = torch.vstack((p_ends, p_end) ) else: __UpperCamelCase = p_start __UpperCamelCase = p_end return p_starts, p_ends
53
"""simple docstring""" _snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}] _snake_case = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
294
0
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import importlib.metadata import json import os from dataclasses import dataclass from typing import Any, Dict, Union from packaging import version from ..utils import is_torch_available, logging if is_torch_available(): import torch a__ : Union[str, Any] = logging.get_logger(__name__) @dataclass class UpperCamelCase_ : """simple docstring""" def __init__( self : Union[str, Any] , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : List[Any]=False , UpperCAmelCase__ : List[Any]=6.0 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]="fp4" , UpperCAmelCase__ : Optional[int]=False , **UpperCAmelCase__ : List[str] , ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE = load_in_abit __SCREAMING_SNAKE_CASE = load_in_abit __SCREAMING_SNAKE_CASE = llm_inta_threshold __SCREAMING_SNAKE_CASE = llm_inta_skip_modules __SCREAMING_SNAKE_CASE = llm_inta_enable_fpaa_cpu_offload __SCREAMING_SNAKE_CASE = llm_inta_has_fpaa_weight __SCREAMING_SNAKE_CASE = bnb_abit_quant_type __SCREAMING_SNAKE_CASE = bnb_abit_use_double_quant if bnb_abit_compute_dtype is None: __SCREAMING_SNAKE_CASE = torch.floataa elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): __SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , UpperCAmelCase__ ) elif isinstance(UpperCAmelCase__ , torch.dtype ): __SCREAMING_SNAKE_CASE = bnb_abit_compute_dtype else: raise ValueError("bnb_4bit_compute_dtype must be a string or a torch.dtype" ) self.post_init() def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]: if not isinstance(self.llm_inta_threshold , UpperCAmelCase__ ): raise ValueError("llm_int8_threshold must be a float" ) if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , UpperCAmelCase__ ): raise ValueError("llm_int8_skip_modules must be a list of strings" ) if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , UpperCAmelCase__ ): raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean" ) if not isinstance(self.llm_inta_has_fpaa_weight , UpperCAmelCase__ ): raise ValueError("llm_int8_has_fp16_weight must be a boolean" ) if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ): raise ValueError("bnb_4bit_compute_dtype must be torch.dtype" ) if not isinstance(self.bnb_abit_quant_type , UpperCAmelCase__ ): raise ValueError("bnb_4bit_quant_type must be a string" ) if not isinstance(self.bnb_abit_use_double_quant , UpperCAmelCase__ ): raise ValueError("bnb_4bit_use_double_quant must be a boolean" ) if self.load_in_abit and not version.parse(importlib.metadata.version("bitsandbytes" ) ) >= version.parse( "0.39.0" ): raise ValueError( "4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version" ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[str, Any]: return self.load_in_abit or self.load_in_abit def UpperCAmelCase_ ( self : List[str] ) -> int: if self.load_in_abit: return "llm_int8" elif self.load_in_abit and self.bnb_abit_quant_type == "fp4": return "fp4" elif self.load_in_abit and self.bnb_abit_quant_type == "nf4": return "nf4" else: return None @classmethod def UpperCAmelCase_ ( cls : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : List[Any] ) -> List[str]: __SCREAMING_SNAKE_CASE = cls(**UpperCAmelCase__ ) __SCREAMING_SNAKE_CASE = [] for key, value in kwargs.items(): if hasattr(UpperCAmelCase__ , UpperCAmelCase__ ): setattr(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) to_remove.append(UpperCAmelCase__ ) for key in to_remove: kwargs.pop(UpperCAmelCase__ , UpperCAmelCase__ ) if return_unused_kwargs: return config, kwargs else: return config def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, os.PathLike] ) -> List[Any]: with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as writer: __SCREAMING_SNAKE_CASE = self.to_dict() __SCREAMING_SNAKE_CASE = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + "\n" writer.write(UpperCAmelCase__ ) def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, Any]: __SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ ) __SCREAMING_SNAKE_CASE = str(output["bnb_4bit_compute_dtype"] ).split("." )[1] return output def __repr__( self : Optional[int] ) -> List[Any]: return F"""{self.__class__.__name__} {self.to_json_string()}""" def UpperCAmelCase_ ( self : List[str] , UpperCAmelCase__ : bool = True ) -> str: if use_diff is True: __SCREAMING_SNAKE_CASE = self.to_diff_dict() else: __SCREAMING_SNAKE_CASE = self.to_dict() return json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + "\n" def UpperCAmelCase_ ( self : Optional[Any] ) -> Dict[str, Any]: __SCREAMING_SNAKE_CASE = self.to_dict() # get the default config dict __SCREAMING_SNAKE_CASE = BitsAndBytesConfig().to_dict() __SCREAMING_SNAKE_CASE = {} # only serialize values that differ from the default config for key, value in config_dict.items(): if value != default_config_dict[key]: __SCREAMING_SNAKE_CASE = value return serializable_config_dict
54
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = '''mvp''' UpperCamelCase : Union[str, Any] = ['''past_key_values'''] UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]: _a : Any = vocab_size _a : Any = max_position_embeddings _a : Union[str, Any] = d_model _a : List[str] = encoder_ffn_dim _a : List[Any] = encoder_layers _a : Dict = encoder_attention_heads _a : Tuple = decoder_ffn_dim _a : List[Any] = decoder_layers _a : Optional[Any] = decoder_attention_heads _a : Optional[Any] = dropout _a : str = attention_dropout _a : Dict = activation_dropout _a : Any = activation_function _a : Tuple = init_std _a : Dict = encoder_layerdrop _a : Optional[int] = decoder_layerdrop _a : Optional[Any] = classifier_dropout _a : List[Any] = use_cache _a : Dict = encoder_layers _a : str = scale_embedding # scale factor will be sqrt(d_model) if True _a : int = use_prompt _a : Dict = prompt_length _a : Dict = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ): _a : List[str] = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ """The config can simply be saved and uploaded again to be fixed.""" )
294
0
'''simple docstring''' import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) a_ : Optional[Any] = logging.getLogger(__name__) @dataclass class snake_case : """simple docstring""" _lowerCamelCase = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) _lowerCamelCase = field(default=lowercase , metadata={"help": "Whether tp freeze the encoder."} ) _lowerCamelCase = field(default=lowercase , metadata={"help": "Whether to freeze the embeddings."} ) @dataclass class snake_case : """simple docstring""" _lowerCamelCase = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) _lowerCamelCase = field( default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , ) _lowerCamelCase = field( default=10_24 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowerCamelCase = field( default=1_28 , metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowerCamelCase = field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) } , ) _lowerCamelCase = field( default=1_42 , metadata={ "help": ( "The maximum total sequence length for test target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) _lowerCamelCase = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} ) _lowerCamelCase = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} ) _lowerCamelCase = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} ) _lowerCamelCase = field(default=lowercase , metadata={"help": "Source language id for translation."} ) _lowerCamelCase = field(default=lowercase , metadata={"help": "Target language id for translation."} ) _lowerCamelCase = field(default=lowercase , metadata={"help": "# num_beams to use for evaluation."} ) _lowerCamelCase = field( default=lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , ) def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ): logger.info(F'''***** {split} metrics *****''' ) for key in sorted(metrics.keys() ): logger.info(F''' {key} = {metrics[key]}''' ) save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'''{split}_results.json''' ) ) def __snake_case ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_args_into_dataclasses() check_output_dir(UpperCAmelCase_ ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCamelCase_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute''' setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) ) lowerCamelCase_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(UpperCAmelCase_ , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowerCamelCase_ = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase_ , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): lowerCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowerCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(UpperCAmelCase_ ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowerCamelCase_ = SeqaSeqDataset # Get datasets lowerCamelCase_ = ( dataset_class( UpperCAmelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_train else None ) lowerCamelCase_ = ( dataset_class( UpperCAmelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowerCamelCase_ = ( dataset_class( UpperCAmelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , ) if training_args.do_predict else None ) # Initialize our Trainer lowerCamelCase_ = ( build_compute_metrics_fn(data_args.task , UpperCAmelCase_ ) if training_args.predict_with_generate else None ) lowerCamelCase_ = SeqaSeqTrainer( model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , data_collator=SeqaSeqDataCollator( UpperCAmelCase_ , UpperCAmelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , ) lowerCamelCase_ = {} # Training if training_args.do_train: logger.info("*** Train ***" ) lowerCamelCase_ = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowerCamelCase_ = train_result.metrics lowerCamelCase_ = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train" , UpperCAmelCase_ , training_args.output_dir ) all_metrics.update(UpperCAmelCase_ ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) lowerCamelCase_ = trainer.evaluate(metric_key_prefix="val" ) lowerCamelCase_ = data_args.n_val lowerCamelCase_ = round(metrics["val_loss"] , 4 ) if trainer.is_world_process_zero(): handle_metrics("val" , UpperCAmelCase_ , training_args.output_dir ) all_metrics.update(UpperCAmelCase_ ) if training_args.do_predict: logger.info("*** Predict ***" ) lowerCamelCase_ = trainer.predict(test_dataset=UpperCAmelCase_ , metric_key_prefix="test" ) lowerCamelCase_ = test_output.metrics lowerCamelCase_ = data_args.n_test if trainer.is_world_process_zero(): lowerCamelCase_ = round(metrics["test_loss"] , 4 ) handle_metrics("test" , UpperCAmelCase_ , training_args.output_dir ) all_metrics.update(UpperCAmelCase_ ) if training_args.predict_with_generate: lowerCamelCase_ = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) lowerCamelCase_ = lmap(str.strip , UpperCAmelCase_ ) write_txt_file(UpperCAmelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) ) if trainer.is_world_process_zero(): save_json(UpperCAmelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) ) return all_metrics def __snake_case ( UpperCAmelCase_ : Dict ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
55
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _snake_case = logging.getLogger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple: # in NER datasets, the last column is usually reserved for NER label _a : Optional[int] = label_idx def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : Any = mode.value _a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : int = 1 _a : int = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: _a : str = [] _a : str = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 _a : List[str] = [] _a : str = [] else: _a : List[Any] = line.split(""" """ ) words.append(splits[0] ) if len(UpperCAmelCase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) return examples def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]: _a : List[str] = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(UpperCAmelCase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(UpperCAmelCase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : List[Any] = f.read().splitlines() if "O" not in labels: _a : Union[str, Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCamelCase ( snake_case_ ): def __init__( self : Union[str, Any] ) -> List[str]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : Optional[int] = f.read().splitlines() if "O" not in labels: _a : Optional[Any] = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : List[Any] = mode.value _a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : List[str] = 1 _a : Optional[Any] = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(UpperCAmelCase__ ): _a : List[Any] = [] _a : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 return examples def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict: _a : Optional[Any] = 0 for sentence in parse_incr(UpperCAmelCase__ ): _a : List[str] = preds_list[example_id] _a : str = """""" for token in sentence: out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(UpperCAmelCase__ ) example_id += 1 def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
294
0
'''simple docstring''' import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging a : List[Any] = logging.get_logger(__name__) def __magic_name__ ( ) -> str: '''simple docstring''' snake_case_ = os.getenv('''SM_HP_MP_PARAMETERS''', '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. snake_case_ = json.loads(__UpperCAmelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. snake_case_ = os.getenv('''SM_FRAMEWORK_PARAMS''', '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". snake_case_ = json.loads(__UpperCAmelCase ) if not mpi_options.get('''sagemaker_mpi_enabled''', __UpperCAmelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class a ( _lowerCamelCase ): snake_case_ = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def A_ ( self : Optional[Any] ): super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , lowercase_ , ) @cached_property def A_ ( self : List[Any] ): logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: snake_case_ = torch.device('''cpu''' ) snake_case_ = 0 elif is_sagemaker_model_parallel_available(): snake_case_ = smp.local_rank() snake_case_ = torch.device('''cuda''' , lowercase_ ) snake_case_ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) snake_case_ = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) snake_case_ = torch.device('''cuda''' , self.local_rank ) snake_case_ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 snake_case_ = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. snake_case_ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) snake_case_ = torch.device('''cuda''' , self.local_rank ) snake_case_ = 1 if device.type == "cuda": torch.cuda.set_device(lowercase_ ) return device @property def A_ ( self : Optional[int] ): if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def A_ ( self : Dict ): return not is_sagemaker_model_parallel_available() @property def A_ ( self : Optional[Any] ): return False
56
"""simple docstring""" from __future__ import annotations import time import numpy as np _snake_case = [8, 5, 9, 7] _snake_case = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _snake_case = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase : def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None: _a : List[str] = claim_vector _a : List[Any] = allocated_resources_table _a : Union[str, Any] = maximum_claim_table def _lowercase ( self : Tuple ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _lowercase ( self : int ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _lowercase ( self : List[str] ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]: return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()} def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None: _a : List[Any] = self.__need() _a : Optional[int] = self.__allocated_resources_table _a : str = self.__available_resources() _a : Optional[Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: _a : int = False for each_need in need_list: _a : Optional[int] = True for index, need in enumerate(UpperCAmelCase__ ): if need > available_resources[index]: _a : List[Any] = False break if execution: _a : str = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: _a : Any = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(UpperCAmelCase__ ) # update available/freed resources stack _a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def _lowercase ( self : Any ) -> Optional[int]: print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
294
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray] __UpperCAmelCase : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class _UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' __UpperCAmelCase : np.ndarray __UpperCAmelCase : List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
57
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar _snake_case = TypeVar('_T') class UpperCamelCase ( Generic[_T] ): def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None: _a : list[_T] = list(iterable or [] ) _a : list[_T] = [] def __len__( self : str ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[str] ) -> str: return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None: self._stacka.append(UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ) -> _T: _a : Any = self._stacka.pop _a : Union[str, Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
294
0
'''simple docstring''' import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process lowercase_ = logging.getLogger(__name__) lowercase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class a_ : '''simple docstring''' UpperCamelCase = field( default=snake_case_ , metadata={ '''help''': ( '''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.''' ) } , ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , ) UpperCamelCase = field( default=snake_case_ , metadata={ '''help''': ( '''Override some existing default config settings when a model is trained from scratch. Example: ''' '''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index''' ) } , ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , ) UpperCamelCase = field( default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , ) UpperCamelCase = field( default=snake_case_ , metadata={ '''help''': ( '''Will use the token generated when running `huggingface-cli login` (necessary to use this script ''' '''with private models).''' ) } , ) def snake_case_( self ) -> str: if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( """--config_overrides can't be used in combination with --config_name or --model_name_or_path""" ) @dataclass class a_ : '''simple docstring''' UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) UpperCamelCase = field(default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) UpperCamelCase = field( default=5 , metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } , ) UpperCamelCase = field( default=snake_case_ , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated. Default to the max input length of the model.''' ) } , ) UpperCamelCase = field( default=snake_case_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , ) UpperCamelCase = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) UpperCamelCase = field( default=snake_case_ , metadata={ '''help''': ( '''Whether to pad all samples to `max_seq_length`. ''' '''If False, will pad the samples dynamically when batching to the maximum length in the batch.''' ) } , ) def snake_case_( self ) -> Optional[int]: if self.train_file is not None: _SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: _SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) ->Tuple: with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f: _SCREAMING_SNAKE_CASE = [json.loads(__lowerCamelCase ) for line in f.read().splitlines() if (len(__lowerCamelCase ) > 0 and not line.isspace())] assert len(__lowerCamelCase ) == len(__lowerCamelCase ) _SCREAMING_SNAKE_CASE = {c: dataset[c] for c in dataset.column_names} _SCREAMING_SNAKE_CASE = refs return Dataset.from_dict(__lowerCamelCase ) def lowerCamelCase ( ) ->Union[str, Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses() # Detecting last checkpoint. _SCREAMING_SNAKE_CASE = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. ' """Use --overwrite_output_dir to overcome.""" ) elif last_checkpoint is not None: logger.info( F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN ) # Log on each process the small summary: logger.warning( F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , __lowerCamelCase ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. _SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name ) if "validation" not in datasets.keys(): _SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , ) _SCREAMING_SNAKE_CASE = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , ) else: _SCREAMING_SNAKE_CASE = {} if data_args.train_file is not None: _SCREAMING_SNAKE_CASE = data_args.train_file if data_args.validation_file is not None: _SCREAMING_SNAKE_CASE = data_args.validation_file _SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1] if extension == "txt": _SCREAMING_SNAKE_CASE = """text""" _SCREAMING_SNAKE_CASE = load_dataset(__lowerCamelCase , data_files=__lowerCamelCase ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _SCREAMING_SNAKE_CASE = { """cache_dir""": model_args.cache_dir, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.config_name: _SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , **__lowerCamelCase ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase ) else: _SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]() logger.warning("""You are instantiating a new config instance from scratch.""" ) if model_args.config_overrides is not None: logger.info(F'Overriding config: {model_args.config_overrides}' ) config.update_from_string(model_args.config_overrides ) logger.info(F'New config: {config}' ) _SCREAMING_SNAKE_CASE = { """cache_dir""": model_args.cache_dir, """use_fast""": model_args.use_fast_tokenizer, """revision""": model_args.model_revision, """use_auth_token""": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowerCamelCase ) elif model_args.model_name_or_path: _SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase ) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) if model_args.model_name_or_path: _SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("""Training new model from scratch""" ) _SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_config(__lowerCamelCase ) model.resize_token_embeddings(len(__lowerCamelCase ) ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: _SCREAMING_SNAKE_CASE = datasets["""train"""].column_names else: _SCREAMING_SNAKE_CASE = datasets["""validation"""].column_names _SCREAMING_SNAKE_CASE = """text""" if """text""" in column_names else column_names[0] _SCREAMING_SNAKE_CASE = """max_length""" if data_args.pad_to_max_length else False def tokenize_function(__lowerCamelCase : List[str] ): # Remove empty lines _SCREAMING_SNAKE_CASE = [line for line in examples["""text"""] if len(__lowerCamelCase ) > 0 and not line.isspace()] return tokenizer(examples["""text"""] , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=data_args.max_seq_length ) _SCREAMING_SNAKE_CASE = datasets.map( __lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , ) # Add the chinese references if provided if data_args.train_ref_file is not None: _SCREAMING_SNAKE_CASE = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file ) if data_args.validation_ref_file is not None: _SCREAMING_SNAKE_CASE = add_chinese_references( tokenized_datasets["""validation"""] , data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer _SCREAMING_SNAKE_CASE = data_args.train_ref_file or data_args.validation_ref_file if has_ref: _SCREAMING_SNAKE_CASE = False # Data collator # This one will take care of randomly masking the tokens. _SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(tokenizer=__lowerCamelCase , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer _SCREAMING_SNAKE_CASE = Trainer( model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , ) # Training if training_args.do_train: if last_checkpoint is not None: _SCREAMING_SNAKE_CASE = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ): _SCREAMING_SNAKE_CASE = model_args.model_name_or_path else: _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__lowerCamelCase ) trainer.save_model() # Saves the tokenizer too for easy upload _SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """train_results.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCamelCase , """w""" ) as writer: logger.info("""***** Train results *****""" ) for key, value in sorted(train_result.metrics.items() ): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # Evaluation _SCREAMING_SNAKE_CASE = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) _SCREAMING_SNAKE_CASE = trainer.evaluate() _SCREAMING_SNAKE_CASE = math.exp(eval_output["""eval_loss"""] ) _SCREAMING_SNAKE_CASE = perplexity _SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" ) if trainer.is_world_process_zero(): with open(__lowerCamelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in sorted(results.items() ): logger.info(F' {key} = {value}' ) writer.write(F'{key} = {value}\n' ) return results def lowerCamelCase ( __lowerCamelCase : List[str] ) ->Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
58
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _snake_case = False class UpperCamelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Optional[Any] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Tuple ) -> List[Any]: _a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Optional[Any] = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase__ ) _a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : Optional[Any] = generator.manual_seed(0 ) _a : str = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : int = """cyberpunk 2077""" _a : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Tuple = torch.manual_seed(0 ) _a : Any = pipe.dual_guided( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _a : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : int = """A painting of a squirrel eating a burger """ _a : Tuple = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.text_to_image( prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images _a : int = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images _a : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
294
0
import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( ConditionalDetrConfig, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) __lowerCamelCase = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.weight', F'encoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.encoder.layers.{i}.self_attn.out_proj.bias', F'encoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'encoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'encoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'encoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'encoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.encoder.layers.{i}.norm1.weight', F'encoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'encoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'encoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'encoder.layers.{i}.final_layer_norm.bias')) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'decoder.layers.{i}.self_attn.out_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'decoder.layers.{i}.self_attn.out_proj.bias') ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.weight', F'decoder.layers.{i}.encoder_attn.out_proj.weight', ) ) rename_keys.append( ( F'transformer.decoder.layers.{i}.cross_attn.out_proj.bias', F'decoder.layers.{i}.encoder_attn.out_proj.bias', ) ) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'decoder.layers.{i}.fc1.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'decoder.layers.{i}.fc1.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'decoder.layers.{i}.fc2.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'decoder.layers.{i}.fc2.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm1.weight', F'decoder.layers.{i}.self_attn_layer_norm.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'decoder.layers.{i}.self_attn_layer_norm.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.weight', F'decoder.layers.{i}.encoder_attn_layer_norm.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.norm2.bias', F'decoder.layers.{i}.encoder_attn_layer_norm.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'decoder.layers.{i}.final_layer_norm.weight')) rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'decoder.layers.{i}.final_layer_norm.bias')) # q, k, v projections in self/cross-attention in decoder for conditional DETR rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', F'decoder.layers.{i}.sa_qcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', F'decoder.layers.{i}.sa_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qpos_proj.weight', F'decoder.layers.{i}.sa_qpos_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kpos_proj.weight', F'decoder.layers.{i}.sa_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.weight', F'decoder.layers.{i}.sa_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', F'decoder.layers.{i}.ca_qcontent_proj.weight') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', F'decoder.layers.{i}.ca_kcontent_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kpos_proj.weight', F'decoder.layers.{i}.ca_kpos_proj.weight') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.weight', F'decoder.layers.{i}.ca_v_proj.weight')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', F'decoder.layers.{i}.ca_qpos_sine_proj.weight') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', F'decoder.layers.{i}.sa_qcontent_proj.bias') ) rename_keys.append( (F'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', F'decoder.layers.{i}.sa_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.sa_qpos_proj.bias', F'decoder.layers.{i}.sa_qpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_kpos_proj.bias', F'decoder.layers.{i}.sa_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.sa_v_proj.bias', F'decoder.layers.{i}.sa_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', F'decoder.layers.{i}.ca_qcontent_proj.bias') ) # rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias")) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', F'decoder.layers.{i}.ca_kcontent_proj.bias') ) rename_keys.append((F'transformer.decoder.layers.{i}.ca_kpos_proj.bias', F'decoder.layers.{i}.ca_kpos_proj.bias')) rename_keys.append((F'transformer.decoder.layers.{i}.ca_v_proj.bias', F'decoder.layers.{i}.ca_v_proj.bias')) rename_keys.append( (F'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', F'decoder.layers.{i}.ca_qpos_sine_proj.bias') ) # convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads # for conditional DETR, also convert reference point head and query scale MLP rename_keys.extend( [ ("""input_proj.weight""", """input_projection.weight"""), ("""input_proj.bias""", """input_projection.bias"""), ("""query_embed.weight""", """query_position_embeddings.weight"""), ("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""), ("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""), ("""class_embed.weight""", """class_labels_classifier.weight"""), ("""class_embed.bias""", """class_labels_classifier.bias"""), ("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""), ("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""), ("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""), ("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""), ("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""), ("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""), ("""transformer.decoder.ref_point_head.layers.0.weight""", """decoder.ref_point_head.layers.0.weight"""), ("""transformer.decoder.ref_point_head.layers.0.bias""", """decoder.ref_point_head.layers.0.bias"""), ("""transformer.decoder.ref_point_head.layers.1.weight""", """decoder.ref_point_head.layers.1.weight"""), ("""transformer.decoder.ref_point_head.layers.1.bias""", """decoder.ref_point_head.layers.1.bias"""), ("""transformer.decoder.query_scale.layers.0.weight""", """decoder.query_scale.layers.0.weight"""), ("""transformer.decoder.query_scale.layers.0.bias""", """decoder.query_scale.layers.0.bias"""), ("""transformer.decoder.query_scale.layers.1.weight""", """decoder.query_scale.layers.1.weight"""), ("""transformer.decoder.query_scale.layers.1.bias""", """decoder.query_scale.layers.1.bias"""), ("""transformer.decoder.layers.0.ca_qpos_proj.weight""", """decoder.layers.0.ca_qpos_proj.weight"""), ("""transformer.decoder.layers.0.ca_qpos_proj.bias""", """decoder.layers.0.ca_qpos_proj.bias"""), ] ) def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] ): snake_case : Optional[Any] = state_dict.pop(__lowerCamelCase ) snake_case : List[Any] = val def UpperCamelCase ( __lowerCamelCase : List[Any] ): snake_case : str = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: snake_case : Union[str, Any] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" ) snake_case : Tuple = value else: snake_case : Tuple = value return new_state_dict def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : str=False ): snake_case : str = "" if is_panoptic: snake_case : str = "conditional_detr." # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) snake_case : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" ) snake_case : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case : List[Any] = in_proj_weight[:256, :] snake_case : str = in_proj_bias[:256] snake_case : Tuple = in_proj_weight[256:512, :] snake_case : Dict = in_proj_bias[256:512] snake_case : List[str] = in_proj_weight[-256:, :] snake_case : List[Any] = in_proj_bias[-256:] def UpperCamelCase ( ): snake_case : List[str] = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case : Dict = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw ) return im @torch.no_grad() def UpperCamelCase ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] ): snake_case : List[Any] = ConditionalDetrConfig() # set backbone and dilation attributes if "resnet101" in model_name: snake_case : int = "resnet101" if "dc5" in model_name: snake_case : List[str] = True snake_case : List[Any] = "panoptic" in model_name if is_panoptic: snake_case : str = 250 else: snake_case : Dict = 91 snake_case : Optional[int] = "huggingface/label-files" snake_case : Optional[Any] = "coco-detection-id2label.json" snake_case : Tuple = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) , "r" ) ) snake_case : List[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()} snake_case : Union[str, Any] = idalabel snake_case : int = {v: k for k, v in idalabel.items()} # load image processor snake_case : Optional[int] = "coco_panoptic" if is_panoptic else "coco_detection" snake_case : Dict = ConditionalDetrImageProcessor(format=__lowerCamelCase ) # prepare image snake_case : Any = prepare_img() snake_case : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ) snake_case : List[Any] = encoding["pixel_values"] logger.info(f"""Converting model {model_name}...""" ) # load original model from torch hub snake_case : Any = torch.hub.load("DeppMeng/ConditionalDETR" , __lowerCamelCase , pretrained=__lowerCamelCase ).eval() snake_case : Union[str, Any] = conditional_detr.state_dict() # rename keys for src, dest in rename_keys: if is_panoptic: snake_case : Optional[int] = "conditional_detr." + src rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) snake_case : Tuple = rename_backbone_keys(__lowerCamelCase ) # query, key and value matrices need special treatment read_in_q_k_v(__lowerCamelCase , is_panoptic=__lowerCamelCase ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them snake_case : List[Any] = "conditional_detr.model." if is_panoptic else "model." for key in state_dict.copy().keys(): if is_panoptic: if ( key.startswith("conditional_detr" ) and not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ) ): snake_case : str = state_dict.pop(__lowerCamelCase ) snake_case : Optional[Any] = val elif "class_labels_classifier" in key or "bbox_predictor" in key: snake_case : Optional[Any] = state_dict.pop(__lowerCamelCase ) snake_case : Optional[Any] = val elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ): continue else: snake_case : List[str] = state_dict.pop(__lowerCamelCase ) snake_case : Tuple = val else: if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ): snake_case : Union[str, Any] = state_dict.pop(__lowerCamelCase ) snake_case : Any = val # finally, create HuggingFace model and load state dict snake_case : Any = ConditionalDetrForSegmentation(__lowerCamelCase ) if is_panoptic else ConditionalDetrForObjectDetection(__lowerCamelCase ) model.load_state_dict(__lowerCamelCase ) model.eval() model.push_to_hub(repo_id=__lowerCamelCase , organization="DepuMeng" , commit_message="Add model" ) # verify our conversion snake_case : List[Any] = conditional_detr(__lowerCamelCase ) snake_case : Optional[int] = model(__lowerCamelCase ) assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-4 ) assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-4 ) if is_panoptic: assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 ) # Save model and image processor logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" ) Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase ) model.save_pretrained(__lowerCamelCase ) image_processor.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": __lowerCamelCase = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""conditional_detr_resnet50""", type=str, help="""Name of the CONDITIONAL_DETR model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) __lowerCamelCase = parser.parse_args() convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
59
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'vocab.json'} _snake_case = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _snake_case = {'mgp-str': 27} class UpperCamelCase ( snake_case_ ): UpperCamelCase : List[str] = VOCAB_FILES_NAMES UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int: super().__init__( unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle: _a : int = json.load(UpperCAmelCase__ ) _a : Optional[int] = {v: k for k, v in self.vocab.items()} @property def _lowercase ( self : Dict ) -> Union[str, Any]: return len(self.vocab ) def _lowercase ( self : Union[str, Any] ) -> str: return dict(self.vocab , **self.added_tokens_encoder ) def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]: _a : Tuple = [] for s in text: char_tokens.extend(UpperCAmelCase__ ) return char_tokens def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict: return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: return self.decoder.get(UpperCAmelCase__ ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) ) return _a : Tuple = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" ) return (vocab_file,)
294
0
"""simple docstring""" snake_case__ : Optional[Any] = {str(digit): digit**5 for digit in range(10)} def _snake_case ( _snake_case : int ): return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) ) def _snake_case ( ): return sum( number for number in range(1000 , 1000000 ) if number == digits_fifth_powers_sum(_snake_case ) ) if __name__ == "__main__": print(solution())
60
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = (IPNDMScheduler,) UpperCamelCase : int = (('''num_inference_steps''', 50),) def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int: _a : Optional[int] = {"""num_train_timesteps""": 1000} config.update(**UpperCAmelCase__ ) return config def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: _a : Optional[int] = dict(self.forward_default_kwargs ) _a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : Union[str, Any] = 0.1 * sample _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Any = dummy_past_residuals[:] if time_step is None: _a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ ) new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Optional[Any] = dummy_past_residuals[:] _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : Tuple ) -> List[str]: pass def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: _a : Optional[Any] = dict(self.forward_default_kwargs ) _a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : List[Any] = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Union[str, Any] = self.get_scheduler_config() _a : Optional[Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) _a : Any = dummy_past_residuals[:] if time_step is None: _a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) _a : Optional[Any] = dummy_past_residuals[:] _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]: _a : Optional[int] = self.scheduler_classes[0] _a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) _a : int = 10 _a : List[Any] = self.dummy_model() _a : str = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _a : str = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample return sample def _lowercase ( self : int ) -> str: _a : Dict = dict(self.forward_default_kwargs ) _a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config() _a : Tuple = scheduler_class(**UpperCAmelCase__ ) _a : Tuple = self.dummy_sample _a : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ): scheduler.set_timesteps(UpperCAmelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ): _a : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] _a : Optional[Any] = dummy_past_residuals[:] _a : Optional[Any] = scheduler.timesteps[5] _a : str = scheduler.timesteps[6] _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) _a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self : List[str] ) -> List[str]: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> List[str]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : int ) -> List[Any]: _a : str = self.full_loop() _a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_mean.item() - 2540529 ) < 10
294
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _a = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys _a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
61
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'spiece.model'} _snake_case = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class UpperCamelCase ( snake_case_ ): def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None: _a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token _a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) _a : Optional[Any] = 3 _a : Tuple = do_lower_case _a : Tuple = remove_space _a : Tuple = keep_accents _a : Tuple = vocab_file _a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) _a : int = jieba _a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _lowercase ( self : Optional[Any] ) -> Any: return len(self.sp_model ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> List[str]: _a : Tuple = self.__dict__.copy() _a : Tuple = None return state def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict: _a : Tuple = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple = {} _a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict: if self.remove_space: _a : Optional[int] = """ """.join(inputs.strip().split() ) else: _a : List[Any] = inputs _a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ ) _a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] ) if self.do_lower_case: _a : Union[str, Any] = outputs.lower() return outputs def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: _a : str = self.preprocess_text(UpperCAmelCase__ ) _a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) _a : Union[str, Any] = [] for piece in pieces: if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _a : Dict = cur_pieces[1:] else: _a : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase__ ) else: new_pieces.append(UpperCAmelCase__ ) return new_pieces def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int: return self.sp_model.PieceToId(UpperCAmelCase__ ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any: return self.sp_model.IdToPiece(UpperCAmelCase__ ) def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict: _a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip() return out_string def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Optional[Any] = [self.sep_token_id] _a : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] return ([0] * len(UpperCAmelCase__ )) + [1, 1] def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Any = [self.sep_token_id] _a : Optional[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Union[str, Any] = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , """wb""" ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,) def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]: _a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
294
0
from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration _A = HfArgumentParser(InitializationArguments) _A = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization _A = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks _A = { 'vocab_size': len(tokenizer), 'scale_attn_by_inverse_layer_idx': True, 'reorder_and_upcast_attn': True, } # Load model config (GPT-2 large in this case) _A = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config _A = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
62
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase ( nn.Module ): def __init__( self : Union[str, Any] ) -> int: super().__init__() _a : Optional[Any] = nn.Linear(3 , 4 ) _a : Tuple = nn.BatchNormad(4 ) _a : Dict = nn.Linear(4 , 5 ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int: return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) ) class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]: return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]: return output + 1 class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Dict ) -> str: _a : List[Any] = ModelForTest() _a : str = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(test_model._hf_hook , UpperCAmelCase__ ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Dict = ModelForTest() _a : Dict = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ ) self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Dict ) -> int: _a : str = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Optional[Any] = test_model(x + 1 ) _a : str = test_model(x + 2 ) _a : Union[str, Any] = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : int = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : str = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : int = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) def _lowercase ( self : Tuple ) -> int: _a : Tuple = ModelForTest() _a : Union[str, Any] = torch.randn(2 , 3 ) _a : Optional[int] = test_model(UpperCAmelCase__ ) _a : int = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : List[Any] = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Dict = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : Any = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Optional[int] = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 ) def _lowercase ( self : Dict ) -> Optional[Any]: _a : Any = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Dict = test_model(UpperCAmelCase__ ) _a : Any = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _a : Any = True _a : Union[str, Any] = test_model(UpperCAmelCase__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _lowercase ( self : Optional[Any] ) -> str: _a : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _a : Optional[int] = torch.randn(2 , 3 ) _a : Any = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) ) _a : str = torch.randn(2 , 3 ).to(0 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(0 ) ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : int = torch.randn(2 , 3 ) _a : str = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload _a : List[str] = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Tuple = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Tuple ) -> List[str]: _a : str = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : List[Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : List[str] = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Dict ) -> str: _a : Optional[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : str = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Union[str, Any] = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Any = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
294
0
'''simple docstring''' import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values lowerCAmelCase_ : List[str] = argparse.ArgumentParser() parser.add_argument('--user', type=str, default='ubuntu') parser.add_argument('--host', type=str, default='localhost') parser.add_argument('--key_path', type=str, default=None) parser.add_argument('--instance', type=str, default='V100:1') parser.add_argument('--provider', type=str, default='cheapest') parser.add_argument('--use_spot', type=bool, default=False) parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py') lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError('Cannot specify both BYO and on-demand cluster args') lowerCAmelCase_ : List[Any] = rh.cluster( name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path} ) else: lowerCAmelCase_ : Dict = rh.cluster( name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) lowerCAmelCase_ : Optional[Any] = args.example.rsplit('/', 1)[0] # Set up remote environment cluster.install_packages(['pip:./']) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"""pip install -r transformers/examples/{example_dir}/requirements.txt"""]) cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117']) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f"""python transformers/examples/{args.example} {' '.join(shlex.quote(arg) for arg in unknown)}"""]) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
63
"""simple docstring""" from __future__ import annotations def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(UpperCamelCase__ ): print(F"""{i}\t\t{d}""" ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for j in range(UpperCamelCase__ ): _a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Dict = [float("""inf""" )] * vertex_count _a : Any = 0.0 for _ in range(vertex_count - 1 ): for j in range(UpperCamelCase__ ): _a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _a : Any = distance[u] + w _a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input('Enter number of vertices: ').strip()) _snake_case = int(input('Enter number of edges: ').strip()) _snake_case = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) _snake_case , _snake_case , _snake_case = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) _snake_case = {'src': src, 'dst': dest, 'weight': weight} _snake_case = int(input('\nEnter shortest path source:').strip()) _snake_case = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
294
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowercase( __a , unittest.TestCase ): '''simple docstring''' lowercase__ = DiTPipeline lowercase__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowercase__ = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } lowercase__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowercase__ = False def UpperCamelCase_ ( self: Any ): '''simple docstring''' torch.manual_seed(0 ) _snake_case : Tuple = TransformeraDModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=a_, activation_fn="""gelu-approximate""", num_embeds_ada_norm=1_000, norm_type="""ada_norm_zero""", norm_elementwise_affine=a_, ) _snake_case : List[Any] = AutoencoderKL() _snake_case : Any = DDIMScheduler() _snake_case : Optional[Any] = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def UpperCamelCase_ ( self: Any, a_: Dict, a_: List[Any]=0 ): '''simple docstring''' if str(a_ ).startswith("""mps""" ): _snake_case : str = torch.manual_seed(a_ ) else: _snake_case : Dict = torch.Generator(device=a_ ).manual_seed(a_ ) _snake_case : Union[str, Any] = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Tuple = """cpu""" _snake_case : str = self.get_dummy_components() _snake_case : Union[str, Any] = self.pipeline_class(**a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) _snake_case : int = self.get_dummy_inputs(a_ ) _snake_case : Any = pipe(**a_ ).images _snake_case : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3) ) _snake_case : List[str] = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) _snake_case : Optional[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a_, 1E-3 ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' self._test_inference_batch_single_identical(relax_max_difference=a_, expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available(), reason="""XFormers attention is only available with CUDA and `xformers` installed""", ) def UpperCamelCase_ ( self: Any ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self: str ): '''simple docstring''' _snake_case : List[Any] = torch.manual_seed(0 ) _snake_case : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) _snake_case : Optional[Any] = ["""vase""", """umbrella""", """white shark""", """white wolf"""] _snake_case : List[str] = pipe.get_label_ids(a_ ) _snake_case : List[str] = pipe(a_, generator=a_, num_inference_steps=40, output_type="""np""" ).images for word, image in zip(a_, a_ ): _snake_case : Dict = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1E-2 def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : Dict = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) _snake_case : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) _snake_case : Dict = ["""vase""", """umbrella"""] _snake_case : List[str] = pipe.get_label_ids(a_ ) _snake_case : Tuple = torch.manual_seed(0 ) _snake_case : Dict = pipe(a_, generator=a_, num_inference_steps=25, output_type="""np""" ).images for word, image in zip(a_, a_ ): _snake_case : int = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1E-1
64
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = '▁' UpperCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model'} UpperCamelCase__ = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), } } UpperCamelCase__ = { 'facebook/mbart-large-en-ro': 1_0_2_4, 'facebook/mbart-large-cc25': 1_0_2_4, } # fmt: off UpperCamelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class A ( UpperCAmelCase_ ): __UpperCAmelCase : str = VOCAB_FILES_NAMES __UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP __UpperCAmelCase : str = ['input_ids', 'attention_mask'] __UpperCAmelCase : List[int] = [] __UpperCAmelCase : List[int] = [] def __init__(self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : Optional[int]="<mask>" , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : str , ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , ) UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCAmelCase ) ) UpperCAmelCase__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token UpperCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab UpperCAmelCase__ = 1 UpperCAmelCase__ = len(self.sp_model ) UpperCAmelCase__ = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase ) } UpperCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()} UpperCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) UpperCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} UpperCAmelCase__ = list(self.lang_code_to_id.keys() ) if additional_special_tokens is not None: # Only add those special tokens if they are not already there. self._additional_special_tokens.extend( [t for t in additional_special_tokens if t not in self._additional_special_tokens] ) UpperCAmelCase__ = src_lang if src_lang is not None else "en_XX" UpperCAmelCase__ = self.lang_code_to_id[self._src_lang] UpperCAmelCase__ = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__(self : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = self.__dict__.copy() UpperCAmelCase__ = None UpperCAmelCase__ = self.sp_model.serialized_model_proto() return state def __setstate__(self : int , __UpperCAmelCase : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCAmelCase__ = {} UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) @property def lowercase_ (self : int ) -> Optional[int]: """simple docstring""" return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowercase_ (self : str ) -> str: """simple docstring""" return self._src_lang @src_lang.setter def lowercase_ (self : Any , __UpperCAmelCase : str ) -> None: """simple docstring""" UpperCAmelCase__ = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowercase_ (self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase ) UpperCAmelCase__ = [1] * len(self.prefix_tokens ) UpperCAmelCase__ = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones def lowercase_ (self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowercase_ (self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : int ) -> str: """simple docstring""" if src_lang is None or tgt_lang is None: raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" ) UpperCAmelCase__ = src_lang UpperCAmelCase__ = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase ) UpperCAmelCase__ = self.convert_tokens_to_ids(__UpperCAmelCase ) UpperCAmelCase__ = tgt_lang_id return inputs def lowercase_ (self : Dict ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowercase_ (self : List[Any] , __UpperCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase ) def lowercase_ (self : List[str] , __UpperCAmelCase : str ) -> int: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] UpperCAmelCase__ = self.sp_model.PieceToId(__UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowercase_ (self : str , __UpperCAmelCase : List[Any] ) -> Optional[int]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Any ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip() return out_string def lowercase_ (self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(__UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return UpperCAmelCase__ = os.path.join( __UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , __UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCAmelCase , "wb" ) as fi: UpperCAmelCase__ = self.sp_model.serialized_model_proto() fi.write(__UpperCAmelCase ) return (out_vocab_file,) def lowercase_ (self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "en_XX" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "ro_RO" , **__UpperCAmelCase : Union[str, Any] , ) -> BatchEncoding: """simple docstring""" UpperCAmelCase__ = src_lang UpperCAmelCase__ = tgt_lang return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) def lowercase_ (self : Any ) -> List[Any]: """simple docstring""" return self.set_src_lang_special_tokens(self.src_lang ) def lowercase_ (self : Dict ) -> List[str]: """simple docstring""" return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int] ) -> None: """simple docstring""" UpperCAmelCase__ = self.lang_code_to_id[src_lang] UpperCAmelCase__ = [] UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code] def lowercase_ (self : int , __UpperCAmelCase : str ) -> None: """simple docstring""" UpperCAmelCase__ = self.lang_code_to_id[lang] UpperCAmelCase__ = [] UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
65
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' _a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: _a : Tuple = 1 - (matter_density + radiation_density + dark_energy) _a : int = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _a : List[str] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
294
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : Tuple = """audio-spectrogram-transformer""" def __init__( self: Tuple , snake_case: Dict=768 , snake_case: Tuple=12 , snake_case: Tuple=12 , snake_case: str=3_072 , snake_case: List[Any]="gelu" , snake_case: int=0.0 , snake_case: List[str]=0.0 , snake_case: Optional[Any]=0.0_2 , snake_case: Tuple=1E-12 , snake_case: int=16 , snake_case: List[str]=True , snake_case: Dict=10 , snake_case: Dict=10 , snake_case: Any=1_024 , snake_case: List[str]=128 , **snake_case: List[str] , ) -> Optional[int]: super().__init__(**snake_case ) snake_case_ :Optional[Any] = hidden_size snake_case_ :int = num_hidden_layers snake_case_ :Optional[int] = num_attention_heads snake_case_ :int = intermediate_size snake_case_ :Tuple = hidden_act snake_case_ :int = hidden_dropout_prob snake_case_ :Tuple = attention_probs_dropout_prob snake_case_ :Union[str, Any] = initializer_range snake_case_ :Any = layer_norm_eps snake_case_ :str = patch_size snake_case_ :List[str] = qkv_bias snake_case_ :str = frequency_stride snake_case_ :Any = time_stride snake_case_ :Dict = max_length snake_case_ :List[Any] = num_mel_bins
66
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : List[Any] ) -> Dict: _a : Optional[int] = tempfile.mkdtemp() _a : Optional[Any] = SamImageProcessor() _a : int = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : str ) -> int: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Tuple ) -> Dict: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Dict ) -> Dict: _a : List[Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) -> Tuple: _a : Optional[Any] = self.get_image_processor() _a : int = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Union[str, Any] = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _lowercase ( self : Optional[Any] ) -> Optional[Any]: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = [torch.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : Optional[int] = [[683, 1024]] _a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : int = processor.post_process_masks( UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : Optional[Any] = [np.ones((1, 3, 5, 5) )] _a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : List[str] = [[1, 0], [0, 1]] with self.assertRaises(UpperCAmelCase__ ): _a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) @require_vision @require_tf class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Any ) -> List[str]: _a : List[str] = tempfile.mkdtemp() _a : Any = SamImageProcessor() _a : Union[str, Any] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Dict ) -> List[str]: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: _a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> str: _a : Union[str, Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : int = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _lowercase ( self : Optional[Any] ) -> int: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Any = [tf.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : str = [[683, 1024]] _a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Union[str, Any] = processor.post_process_masks( UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : List[Any] = [np.ones((1, 3, 5, 5) )] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Dict = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _a : List[Any] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : str ) -> Optional[Any]: _a : Optional[Any] = tempfile.mkdtemp() _a : Dict = SamImageProcessor() _a : List[str] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Tuple ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : str ) -> int: _a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _lowercase ( self : int ) -> List[Any]: _a : Optional[Any] = self.get_image_processor() _a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _a : str = [tf.convert_to_tensor(UpperCAmelCase__ )] _a : Optional[int] = [torch.tensor(UpperCAmelCase__ )] _a : Union[str, Any] = [[1764, 2646]] _a : List[str] = [[683, 1024]] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) _a : List[str] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _lowercase ( self : str ) -> Optional[Any]: _a : List[Any] = self.get_image_processor() _a : Any = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Dict = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() _a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
294
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __UpperCAmelCase ={ "configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =["MobileViTFeatureExtractor"] __UpperCAmelCase =["MobileViTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileViTForImageClassification", "MobileViTForSemanticSegmentation", "MobileViTModel", "MobileViTPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCAmelCase =[ "TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileViTForImageClassification", "TFMobileViTForSemanticSegmentation", "TFMobileViTModel", "TFMobileViTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
67
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _snake_case = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } _snake_case = { '169M': 768, '430M': 1024, '1B5': 2048, '3B': 2560, '7B': 4096, '14B': 5120, } def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : int = list(state_dict.keys() ) for name in state_dict_keys: _a : str = state_dict.pop(UpperCamelCase__ ) # emb -> embedding if name.startswith("""emb.""" ): _a : Dict = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): _a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention _a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ ) # ffn -> feed_forward _a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): _a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): _a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): _a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": _a : Optional[int] = """rwkv.""" + name _a : Any = weight return state_dict def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ): '''simple docstring''' # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) _a : Tuple = 5_0_2_7_7 _a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: _a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ ) _a : int = len(UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) # 2. Build the config _a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _a : Tuple = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) _a : List[Any] = RwkvConfig( vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(UpperCamelCase__ ) # 3. Download model file then convert state_dict _a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ ) _a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : List[str] = convert_state_dict(UpperCamelCase__ ) # 4. Split in shards and save _a , _a : List[str] = shard_checkpoint(UpperCamelCase__ ) for shard_file, shard in shards.items(): torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if index is not None: _a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) # Save the index as well with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f: _a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n""" f.write(UpperCamelCase__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) _a : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) _a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" ) tokenizer.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) _snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
294
0
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class a__ ( snake_case ): """simple docstring""" __lowerCamelCase = 42 __lowerCamelCase = None def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Any , SCREAMING_SNAKE_CASE_: Optional[Any]=0.999 , SCREAMING_SNAKE_CASE_: str="cosine" , ) -> Optional[int]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(SCREAMING_SNAKE_CASE_: Optional[int] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(SCREAMING_SNAKE_CASE_: List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) A__ = [] for i in range(SCREAMING_SNAKE_CASE_ ): A__ = i / num_diffusion_timesteps A__ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) / alpha_bar_fn(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) return torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa ) class a__ ( snake_case , snake_case ): """simple docstring""" __lowerCamelCase = 1 @register_to_config def __init__( self , lowercase = 1000 , lowercase = 0.0001 , lowercase = 0.02 , lowercase = "linear" , lowercase = None , lowercase = True , lowercase = True , lowercase = 0 , lowercase = "epsilon" , lowercase = 1.0 , **lowercase , ) -> Union[str, Any]: '''simple docstring''' if kwargs.get("set_alpha_to_one" , lowercase ) is not None: A__ = ( "The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead." ) deprecate("set_alpha_to_one" , "1.0.0" , lowercase , standard_warn=lowercase ) A__ = kwargs["set_alpha_to_one"] if trained_betas is not None: A__ = torch.tensor(lowercase , dtype=torch.floataa ) elif beta_schedule == "linear": A__ = torch.linspace(lowercase , lowercase , lowercase , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. A__ = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowercase , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule A__ = betas_for_alpha_bar(lowercase ) else: raise NotImplementedError(F'{beta_schedule} does is not implemented for {self.__class__}' ) A__ = 1.0 - self.betas A__ = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. A__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution A__ = 1.0 # setable values A__ = None A__ = torch.from_numpy(np.arange(0 , lowercase ).copy().astype(np.intaa ) ) def UpperCamelCase ( self , lowercase , lowercase = None ) -> torch.FloatTensor: '''simple docstring''' return sample def UpperCamelCase ( self , lowercase , lowercase = None ) -> Optional[int]: '''simple docstring''' if num_inference_steps > self.config.num_train_timesteps: raise ValueError( F'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:' F' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle' F' maximal {self.config.num_train_timesteps} timesteps.' ) A__ = num_inference_steps A__ = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 A__ = (np.arange(0 , lowercase ) * step_ratio).round().copy().astype(np.intaa ) A__ = torch.from_numpy(lowercase ).to(lowercase ) self.timesteps += self.config.steps_offset def UpperCamelCase ( self , lowercase , lowercase , lowercase , lowercase = 0.0 , lowercase = False , lowercase = None , lowercase = True , ) -> Union[DDIMSchedulerOutput, Tuple]: '''simple docstring''' A__ = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process A__ = self.alphas_cumprod[timestep] A__ = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) A__ = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 A__ = model_output elif self.config.prediction_type == "sample": A__ = model_output A__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output A__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or' " `v_prediction`" ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: A__ = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf A__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowercase , pred_original_sample=lowercase ) def __len__( self ) -> Optional[Any]: '''simple docstring''' return self.config.num_train_timesteps
68
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> List[str]: _a : Any = """laion/clap-htsat-unfused""" _a : Union[str, Any] = tempfile.mkdtemp() def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict: return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : List[str] ) -> Optional[int]: _a : List[str] = self.get_tokenizer() _a : Any = self.get_feature_extractor() _a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) _a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> Optional[int]: _a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> Optional[Any]: _a : Optional[int] = self.get_feature_extractor() _a : Tuple = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = floats_list((3, 1000) ) _a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Tuple ) -> Optional[int]: _a : List[str] = self.get_feature_extractor() _a : Any = self.get_tokenizer() _a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Optional[int] = """This is a test string""" _a : Tuple = processor(text=UpperCAmelCase__ ) _a : int = tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : List[Any] ) -> Any: _a : str = self.get_feature_extractor() _a : List[str] = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : Dict = processor.batch_decode(UpperCAmelCase__ ) _a : Any = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> List[str]: _a : str = self.get_feature_extractor() _a : Optional[Any] = self.get_tokenizer() _a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
294
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "sew" def __init__( self, lowerCAmelCase__=32, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__=2, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__="group", lowerCAmelCase__="gelu", lowerCAmelCase__=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512), lowerCAmelCase__=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1), lowerCAmelCase__=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1), lowerCAmelCase__=False, lowerCAmelCase__=128, lowerCAmelCase__=16, lowerCAmelCase__=True, lowerCAmelCase__=0.05, lowerCAmelCase__=10, lowerCAmelCase__=2, lowerCAmelCase__=0.0, lowerCAmelCase__=10, lowerCAmelCase__=0, lowerCAmelCase__="mean", lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=256, lowerCAmelCase__=0, lowerCAmelCase__=1, lowerCAmelCase__=2, **lowerCAmelCase__, ) -> str: super().__init__(**lowerCAmelCase__, pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__) snake_case_ = hidden_size snake_case_ = feat_extract_norm snake_case_ = feat_extract_activation snake_case_ = list(lowerCAmelCase__) snake_case_ = list(lowerCAmelCase__) snake_case_ = list(lowerCAmelCase__) snake_case_ = conv_bias snake_case_ = num_conv_pos_embeddings snake_case_ = num_conv_pos_embedding_groups snake_case_ = len(self.conv_dim) snake_case_ = num_hidden_layers snake_case_ = intermediate_size snake_case_ = squeeze_factor snake_case_ = hidden_act snake_case_ = num_attention_heads snake_case_ = hidden_dropout snake_case_ = attention_dropout snake_case_ = activation_dropout snake_case_ = feat_proj_dropout snake_case_ = final_dropout snake_case_ = layerdrop snake_case_ = layer_norm_eps snake_case_ = initializer_range snake_case_ = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect.' 'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,' f'but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)' f'= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case_ = apply_spec_augment snake_case_ = mask_time_prob snake_case_ = mask_time_length snake_case_ = mask_time_min_masks snake_case_ = mask_feature_prob snake_case_ = mask_feature_length snake_case_ = mask_feature_min_masks # ctc loss snake_case_ = ctc_loss_reduction snake_case_ = ctc_zero_infinity # sequence classification snake_case_ = use_weighted_layer_sum snake_case_ = classifier_proj_size @property def a_ ( self) -> Optional[Any]: return functools.reduce(operator.mul, self.conv_stride, 1)
69
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case = logging.get_logger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
294
0
'''simple docstring''' from dataclasses import dataclass, field from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import pyarrow as pa if TYPE_CHECKING: from .features import FeatureType @dataclass class UpperCAmelCase : _lowercase: List[str] _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''Translation''' , init=snake_case_ , repr=snake_case_ ) def __call__( self : Optional[int] ) -> Optional[int]: return pa.struct({lang: pa.string() for lang in sorted(self.languages )} ) def lowercase__ ( self : Union[str, Any] ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value return {k: Value("""string""" ) for k in sorted(self.languages )} @dataclass class UpperCAmelCase : _lowercase: Optional[List] = None _lowercase: Optional[int] = None _lowercase: Optional[str] = None # Automatically constructed _lowercase: ClassVar[str] = "dict" _lowercase: ClassVar[Any] = None _lowercase: str = field(default='''TranslationVariableLanguages''' , init=snake_case_ , repr=snake_case_ ) def lowercase__ ( self : Any ) -> Optional[Any]: _lowerCAmelCase = sorted(set(self.languages ) ) if self.languages else None _lowerCAmelCase = len(self.languages ) if self.languages else None def __call__( self : List[str] ) -> Optional[Any]: return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} ) def lowercase__ ( self : Optional[Any] , __snake_case : Tuple ) -> Any: _lowerCAmelCase = set(self.languages ) if self.languages and set(__snake_case ) - lang_set: raise ValueError( f"Some languages in example ({', '.join(sorted(set(__snake_case ) - lang_set ) )}) are not in valid set ({', '.join(__snake_case )})." ) # Convert dictionary into tuples, splitting out cases where there are # multiple translations for a single language. _lowerCAmelCase = [] for lang, text in translation_dict.items(): if isinstance(__snake_case , __snake_case ): translation_tuples.append((lang, text) ) else: translation_tuples.extend([(lang, el) for el in text] ) # Ensure translations are in ascending order by language code. _lowerCAmelCase , _lowerCAmelCase = zip(*sorted(__snake_case ) ) return {"language": languages, "translation": translations} def lowercase__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Sequence, Value return { "language": Sequence(Value("""string""" ) ), "translation": Sequence(Value("""string""" ) ), }
70
"""simple docstring""" import unittest import numpy as np def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ): '''simple docstring''' _a : List[Any] = np.shape(UpperCamelCase__ ) _a : Any = np.shape(UpperCamelCase__ ) _a : Union[str, Any] = np.shape(UpperCamelCase__ ) if shape_a[0] != shape_b[0]: _a : int = ( """Expected the same number of rows for A and B. """ F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(UpperCamelCase__ ) if shape_b[1] != shape_c[1]: _a : Tuple = ( """Expected the same number of columns for B and C. """ F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(UpperCamelCase__ ) _a : int = pseudo_inv if a_inv is None: try: _a : Optional[int] = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> None: _a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Optional[int] = np.array([[2, 1], [6, 3]] ) _a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _a : Union[str, Any] = np.block([[a, b], [b.T, c]] ) _a : int = np.linalg.det(UpperCAmelCase__ ) _a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ ) _a : List[Any] = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def _lowercase ( self : int ) -> None: _a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> None: _a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) _a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
294
0
import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class __A ( unittest.TestCase ): """simple docstring""" def __lowercase ( self ): """simple docstring""" __UpperCamelCase : List[str] =['a', 'b', 'c'] # Defaults to last layer if both are None __UpperCamelCase , __UpperCamelCase : List[str] =get_aligned_output_features_output_indices(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ['c'] ) self.assertEqual(lowerCamelCase__ , [2] ) # Out indices set to match out features __UpperCamelCase , __UpperCamelCase : List[Any] =get_aligned_output_features_output_indices(['a', 'c'] , lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ['a', 'c'] ) self.assertEqual(lowerCamelCase__ , [0, 2] ) # Out features set to match out indices __UpperCamelCase , __UpperCamelCase : int =get_aligned_output_features_output_indices(lowerCamelCase__ , [0, 2] , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ['a', 'c'] ) self.assertEqual(lowerCamelCase__ , [0, 2] ) # Out features selected from negative indices __UpperCamelCase , __UpperCamelCase : Dict =get_aligned_output_features_output_indices(lowerCamelCase__ , [-3, -1] , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ['a', 'c'] ) self.assertEqual(lowerCamelCase__ , [-3, -1] ) def __lowercase ( self ): """simple docstring""" with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , lowerCamelCase__ ) # Out features must be a list with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] ) # Out features must be a subset of stage names with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] ) # Out indices must be a list or tuple with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(lowerCamelCase__ , 0 , ['a', 'b'] ) # Out indices must be a subset of stage names with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(lowerCamelCase__ , (0, 1) , ['a'] ) # Out features and out indices must be the same length with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] ) # Out features should match out indices with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] ) # Out features and out indices should be in order with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] ) # Check passes with valid inputs verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] ) def __lowercase ( self ): """simple docstring""" __UpperCamelCase : int =BackboneMixin() __UpperCamelCase : Optional[Any] =['a', 'b', 'c'] __UpperCamelCase : Dict =['a', 'c'] __UpperCamelCase : Union[str, Any] =[0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly __UpperCamelCase : Tuple =['a', 'b'] self.assertEqual(backbone.out_features , ['a', 'b'] ) self.assertEqual(backbone.out_indices , [0, 1] ) __UpperCamelCase : Tuple =[-3, -1] self.assertEqual(backbone.out_features , ['a', 'c'] ) self.assertEqual(backbone.out_indices , [-3, -1] )
71
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = torch.device('cpu') def lowerCAmelCase__ ( ): '''simple docstring''' _a : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" _a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Any = dct.pop(UpperCamelCase__ ) _a : Dict = val def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : Tuple = [] for k in state_dict.keys(): _a : Any = k if ".pwconv" in k: _a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _a : int = k_new.split(""".""" ) if ls[2].isdigit(): _a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _a : Optional[int] = 1_0_0_0 _a : Optional[Any] = """huggingface/label-files""" _a : Optional[Any] = """imagenet-1k-id2label.json""" _a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) _a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _a : Dict = idalabel _a : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _a : Any = [3, 3, 6, 4] _a : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": _a : Any = [3, 3, 9, 6] _a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": _a : List[Any] = [4, 3, 1_0, 5] _a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": _a : List[Any] = [4, 4, 1_2, 6] _a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ ) else: _a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : int = checkpoint _a : Optional[Any] = create_rename_keys(UpperCamelCase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model _a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval() hf_model.load_state_dict(UpperCamelCase__ ) # prepare test inputs _a : Any = prepare_img() _a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" ) # compare outputs from both models _a : Dict = get_expected_output(UpperCamelCase__ ) _a : int = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') _snake_case = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
294
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder lowerCAmelCase__ = '''base_with_context''' def snake_case_ ( A_ : Optional[Any], A_ : str ): '''simple docstring''' _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : Union[str, Any] = weights[F'''layers_{lyr_num}'''] _lowerCamelCase : Optional[Any] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Optional[int] = ly_weight['''attention'''] _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _lowerCamelCase : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def snake_case_ ( A_ : int, A_ : List[Any] ): '''simple docstring''' _lowerCamelCase : str = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) _lowerCamelCase : List[Any] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ ) for lyr_num, lyr in enumerate(model.encoders ): _lowerCamelCase : int = weights[F'''layers_{lyr_num}'''] _lowerCamelCase : List[str] = ly_weight['''attention'''] _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def snake_case_ ( A_ : Dict, A_ : str ): '''simple docstring''' _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ), requires_grad=A_ ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _lowerCamelCase : Dict = weights[F'''layers_{lyr_num}'''] _lowerCamelCase : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : str = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = ly_weight['''self_attention'''] _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = ly_weight['''MultiHeadDotProductAttention_0'''] _lowerCamelCase : Any = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) _lowerCamelCase : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _lowerCamelCase : int = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) _lowerCamelCase : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _lowerCamelCase : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _lowerCamelCase : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _lowerCamelCase : int = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) _lowerCamelCase : Dict = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def snake_case_ ( A_ : int ): '''simple docstring''' _lowerCamelCase : Any = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _lowerCamelCase : Optional[int] = jnp.tree_util.tree_map(onp.array, A_ ) _lowerCamelCase : Dict = [ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] _lowerCamelCase : List[Any] = os.path.join(args.checkpoint_path, '''..''', '''config.gin''' ) _lowerCamelCase : Tuple = inference.parse_training_gin_file(A_, A_ ) _lowerCamelCase : Optional[Any] = inference.InferenceModel(args.checkpoint_path, A_ ) _lowerCamelCase : List[str] = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''', variance_type='''fixed_large''' ) _lowerCamelCase : List[Any] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''], vocab_size=synth_model.model.module.config.vocab_size, d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', ) _lowerCamelCase : List[str] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims, targets_context_length=synth_model.sequence_length['''targets_context'''], d_model=synth_model.model.module.config.emb_dim, dropout_rate=synth_model.model.module.config.dropout_rate, num_layers=synth_model.model.module.config.num_encoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, feed_forward_proj='''gated-gelu''', ) _lowerCamelCase : Dict = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims, targets_length=synth_model.sequence_length['''targets_context'''], max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time, d_model=synth_model.model.module.config.emb_dim, num_layers=synth_model.model.module.config.num_decoder_layers, num_heads=synth_model.model.module.config.num_heads, d_kv=synth_model.model.module.config.head_dim, d_ff=synth_model.model.module.config.mlp_dim, dropout_rate=synth_model.model.module.config.dropout_rate, ) _lowerCamelCase : Optional[Any] = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''], A_ ) _lowerCamelCase : Any = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''], A_ ) _lowerCamelCase : int = load_decoder(ta_checkpoint['''target''']['''decoder'''], A_ ) _lowerCamelCase : str = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) _lowerCamelCase : Optional[int] = SpectrogramDiffusionPipeline( notes_encoder=A_, continuous_encoder=A_, decoder=A_, scheduler=A_, melgan=A_, ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=F"""{MODEL}/checkpoint_500000""", type=str, required=False, help='''Path to the original jax model checkpoint.''', ) lowerCAmelCase__ = parser.parse_args() main(args)
72
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['PerceiverFeatureExtractor'] _snake_case = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
from ..utils import DummyObject, requires_backends class A_ ( metaclass=SCREAMING_SNAKE_CASE ): _UpperCAmelCase : List[str] = ['''keras_nlp'''] def __init__( self : Tuple ,*SCREAMING_SNAKE_CASE__ : Any ,**SCREAMING_SNAKE_CASE__ : List[str]): requires_backends(self ,['keras_nlp'])
73
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict: if k in (0.0_4, 0.0_6): _a : List[str] = k _a : List[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Dict ) -> str: return str(self.k ) def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]: _a : Dict = cva.imread(UpperCAmelCase__ , 0 ) _a , _a : List[Any] = img.shape _a : list[list[int]] = [] _a : List[Any] = img.copy() _a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB ) _a , _a : Any = np.gradient(UpperCAmelCase__ ) _a : Tuple = dx**2 _a : Union[str, Any] = dy**2 _a : Union[str, Any] = dx * dy _a : int = 0.0_4 _a : List[str] = self.window_size // 2 for y in range(UpperCAmelCase__ , h - offset ): for x in range(UpperCAmelCase__ , w - offset ): _a : str = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : List[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Any = (wxx * wyy) - (wxy**2) _a : Tuple = wxx + wyy _a : Any = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _snake_case = HarrisCorner(0.04, 3) _snake_case , _snake_case = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
294
0
"""simple docstring""" from __future__ import annotations from math import pow, sqrt def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance == 0: return {"resistance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(snake_case__ , 2 ) - pow(snake_case__ , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(snake_case__ , 2 ) + pow(snake_case__ , 2 ) )} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
74
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' # Check if the input is valid if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients _a , _a , _a : Any = equationa _a , _a , _a : Tuple = equationa # Calculate the determinants of the matrices _a : int = aa * ba - aa * ba _a : str = ca * ba - ca * ba _a : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _a : Dict = determinant_x / determinant _a : str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
294
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING a_ : int = logging.get_logger(__name__) a_ : Tuple = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class __UpperCamelCase ( lowerCamelCase__ ): lowercase : int ='table-transformer' lowercase : Union[str, Any] =['past_key_values'] lowercase : str ={ 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self, lowerCAmelCase=True, lowerCAmelCase=None, lowerCAmelCase=3, lowerCAmelCase=100, lowerCAmelCase=6, lowerCAmelCase=2_048, lowerCAmelCase=8, lowerCAmelCase=6, lowerCAmelCase=2_048, lowerCAmelCase=8, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=True, lowerCAmelCase="relu", lowerCAmelCase=256, lowerCAmelCase=0.1, lowerCAmelCase=0.0, lowerCAmelCase=0.0, lowerCAmelCase=0.0_2, lowerCAmelCase=1.0, lowerCAmelCase=False, lowerCAmelCase="sine", lowerCAmelCase="resnet50", lowerCAmelCase=True, lowerCAmelCase=False, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=1, lowerCAmelCase=1, lowerCAmelCase=5, lowerCAmelCase=2, lowerCAmelCase=0.1, **lowerCAmelCase, ): """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCamelCase_ =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(lowerCAmelCase, lowerCAmelCase ): lowerCamelCase_ =backbone_config.get('''model_type''' ) lowerCamelCase_ =CONFIG_MAPPING[backbone_model_type] lowerCamelCase_ =config_class.from_dict(lowerCAmelCase ) # set timm attributes to None lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =None, None, None lowerCamelCase_ =use_timm_backbone lowerCamelCase_ =backbone_config lowerCamelCase_ =num_channels lowerCamelCase_ =num_queries lowerCamelCase_ =d_model lowerCamelCase_ =encoder_ffn_dim lowerCamelCase_ =encoder_layers lowerCamelCase_ =encoder_attention_heads lowerCamelCase_ =decoder_ffn_dim lowerCamelCase_ =decoder_layers lowerCamelCase_ =decoder_attention_heads lowerCamelCase_ =dropout lowerCamelCase_ =attention_dropout lowerCamelCase_ =activation_dropout lowerCamelCase_ =activation_function lowerCamelCase_ =init_std lowerCamelCase_ =init_xavier_std lowerCamelCase_ =encoder_layerdrop lowerCamelCase_ =decoder_layerdrop lowerCamelCase_ =encoder_layers lowerCamelCase_ =auxiliary_loss lowerCamelCase_ =position_embedding_type lowerCamelCase_ =backbone lowerCamelCase_ =use_pretrained_backbone lowerCamelCase_ =dilation # Hungarian matcher lowerCamelCase_ =class_cost lowerCamelCase_ =bbox_cost lowerCamelCase_ =giou_cost # Loss coefficients lowerCamelCase_ =mask_loss_coefficient lowerCamelCase_ =dice_loss_coefficient lowerCamelCase_ =bbox_loss_coefficient lowerCamelCase_ =giou_loss_coefficient lowerCamelCase_ =eos_coefficient super().__init__(is_encoder_decoder=lowerCAmelCase, **lowerCAmelCase ) @property def lowercase__ ( self ): """simple docstring""" return self.encoder_attention_heads @property def lowercase__ ( self ): """simple docstring""" return self.d_model class __UpperCamelCase ( lowerCamelCase__ ): lowercase : Tuple =version.parse('1.11' ) @property def lowercase__ ( self ): """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def lowercase__ ( self ): """simple docstring""" return 1e-5 @property def lowercase__ ( self ): """simple docstring""" return 12
75
"""simple docstring""" _snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}] _snake_case = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
294
0
import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPTaConfig, GPTaLMHeadModel, GPTaTokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed a_ = { 'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), 'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), 'bert': (BertConfig, BertForMaskedLM, BertTokenizer), 'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer), } def lowerCamelCase__ ( _a): assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def lowerCamelCase__ ( _a , _a): if args.student_type == "roberta": SCREAMING_SNAKE_CASE : Any = False elif args.student_type == "gpt2": SCREAMING_SNAKE_CASE : Dict = False def lowerCamelCase__ ( _a , _a): if args.student_type == "roberta": SCREAMING_SNAKE_CASE : int = False def lowerCamelCase__ ( ): SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description="Training") parser.add_argument("--force" , action="store_true" , help="Overwrite dump_path if it already exists.") parser.add_argument( "--dump_path" , type=_a , required=_a , help="The output directory (log, checkpoints, parameters, etc.)") parser.add_argument( "--data_file" , type=_a , required=_a , help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence." , ) parser.add_argument( "--student_type" , type=_a , choices=["distilbert", "roberta", "gpt2"] , required=_a , help="The student type (DistilBERT, RoBERTa)." , ) parser.add_argument("--student_config" , type=_a , required=_a , help="Path to the student configuration.") parser.add_argument( "--student_pretrained_weights" , default=_a , type=_a , help="Load student initialization checkpoint.") parser.add_argument( "--teacher_type" , choices=["bert", "roberta", "gpt2"] , required=_a , help="Teacher type (BERT, RoBERTa).") parser.add_argument("--teacher_name" , type=_a , required=_a , help="The teacher model.") parser.add_argument("--temperature" , default=2.0 , type=_a , help="Temperature for the softmax temperature.") parser.add_argument( "--alpha_ce" , default=0.5 , type=_a , help="Linear weight for the distillation loss. Must be >=0.") parser.add_argument( "--alpha_mlm" , default=0.0 , type=_a , help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag." , ) parser.add_argument("--alpha_clm" , default=0.5 , type=_a , help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse" , default=0.0 , type=_a , help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument( "--alpha_cos" , default=0.0 , type=_a , help="Linear weight of the cosine embedding loss. Must be >=0.") parser.add_argument( "--mlm" , action="store_true" , help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.") parser.add_argument( "--mlm_mask_prop" , default=0.15 , type=_a , help="Proportion of tokens for which we need to make a prediction." , ) parser.add_argument("--word_mask" , default=0.8 , type=_a , help="Proportion of tokens to mask out.") parser.add_argument("--word_keep" , default=0.1 , type=_a , help="Proportion of tokens to keep.") parser.add_argument("--word_rand" , default=0.1 , type=_a , help="Proportion of tokens to randomly replace.") parser.add_argument( "--mlm_smoothing" , default=0.7 , type=_a , help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec)." , ) parser.add_argument("--token_counts" , type=_a , help="The token counts in the data_file for MLM.") parser.add_argument( "--restrict_ce_to_mask" , action="store_true" , help="If true, compute the distillation loss only the [MLM] prediction distribution." , ) parser.add_argument( "--freeze_pos_embs" , action="store_true" , help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only." , ) parser.add_argument( "--freeze_token_type_embds" , action="store_true" , help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only." , ) parser.add_argument("--n_epoch" , type=_a , default=3 , help="Number of pass on the whole dataset.") parser.add_argument("--batch_size" , type=_a , default=5 , help="Batch size (for each process).") parser.add_argument( "--group_by_size" , action="store_false" , help="If true, group sequences that have similar length into the same batch. Default is true." , ) parser.add_argument( "--gradient_accumulation_steps" , type=_a , default=50 , help="Gradient accumulation for larger training batches." , ) parser.add_argument("--warmup_prop" , default=0.05 , type=_a , help="Linear warmup proportion.") parser.add_argument("--weight_decay" , default=0.0 , type=_a , help="Weight decay if we apply some.") parser.add_argument("--learning_rate" , default=5E-4 , type=_a , help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon" , default=1E-6 , type=_a , help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm" , default=5.0 , type=_a , help="Max gradient norm.") parser.add_argument("--initializer_range" , default=0.02 , type=_a , help="Random initialization range.") parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_a , default="O1" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_gpu" , type=_a , default=1 , help="Number of GPUs in the node.") parser.add_argument("--local_rank" , type=_a , default=-1 , help="Distributed training - Local rank") parser.add_argument("--seed" , type=_a , default=56 , help="Random seed") parser.add_argument("--log_interval" , type=_a , default=500 , help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval" , type=_a , default=4000 , help="Checkpoint interval.") SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() sanity_checks(_a) # ARGS # init_gpu_params(_a) set_seed(_a) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite" " itUse `--force` if you want to overwrite it") else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f"Experiment will be dumped and logged in {args.dump_path}") # SAVE PARAMS # logger.info(f"Param: {args}") with open(os.path.join(args.dump_path , "parameters.json") , "w") as f: json.dump(vars(_a) , _a , indent=4) git_log(args.dump_path) SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : List[Any] = MODEL_CLASSES[args.student_type] SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Union[str, Any] = MODEL_CLASSES[args.teacher_type] # TOKENIZER # SCREAMING_SNAKE_CASE : str = teacher_tokenizer_class.from_pretrained(args.teacher_name) SCREAMING_SNAKE_CASE : Any = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): SCREAMING_SNAKE_CASE : Dict = tokenizer.all_special_tokens.index(_a) SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}") SCREAMING_SNAKE_CASE : Optional[Any] = special_tok_ids SCREAMING_SNAKE_CASE : List[str] = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}") with open(args.data_file , "rb") as fp: SCREAMING_SNAKE_CASE : str = pickle.load(_a) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)") with open(args.token_counts , "rb") as fp: SCREAMING_SNAKE_CASE : Any = pickle.load(_a) SCREAMING_SNAKE_CASE : List[Any] = np.maximum(_a , 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): SCREAMING_SNAKE_CASE : Union[str, Any] = 0.0 # do not predict special tokens SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_a) else: SCREAMING_SNAKE_CASE : Optional[int] = None SCREAMING_SNAKE_CASE : Optional[int] = LmSeqsDataset(params=_a , data=_a) logger.info("Data loader created.") # STUDENT # logger.info(f"Loading student config from {args.student_config}") SCREAMING_SNAKE_CASE : List[str] = student_config_class.from_pretrained(args.student_config) SCREAMING_SNAKE_CASE : List[str] = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}") SCREAMING_SNAKE_CASE : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=_a) else: SCREAMING_SNAKE_CASE : Optional[Any] = student_model_class(_a) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}") logger.info("Student loaded.") # TEACHER # SCREAMING_SNAKE_CASE : Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_a) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}") logger.info(f"Teacher loaded from {args.teacher_name}.") # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(_a , _a) if args.freeze_token_type_embds: freeze_token_type_embeddings(_a , _a) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() SCREAMING_SNAKE_CASE : List[str] = Distiller( params=_a , dataset=_a , token_probs=_a , student=_a , teacher=_a) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
76
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = '''mvp''' UpperCamelCase : Union[str, Any] = ['''past_key_values'''] UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]: _a : Any = vocab_size _a : Any = max_position_embeddings _a : Union[str, Any] = d_model _a : List[str] = encoder_ffn_dim _a : List[Any] = encoder_layers _a : Dict = encoder_attention_heads _a : Tuple = decoder_ffn_dim _a : List[Any] = decoder_layers _a : Optional[Any] = decoder_attention_heads _a : Optional[Any] = dropout _a : str = attention_dropout _a : Dict = activation_dropout _a : Any = activation_function _a : Tuple = init_std _a : Dict = encoder_layerdrop _a : Optional[int] = decoder_layerdrop _a : Optional[Any] = classifier_dropout _a : List[Any] = use_cache _a : Dict = encoder_layers _a : str = scale_embedding # scale factor will be sqrt(d_model) if True _a : int = use_prompt _a : Dict = prompt_length _a : Dict = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ): _a : List[str] = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ """The config can simply be saved and uploaded again to be fixed.""" )
294
0
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin _UpperCamelCase : int = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece @require_tokenizers class UpperCAmelCase_ ( _a , unittest.TestCase): lowerCamelCase__ : Optional[int] = ReformerTokenizer lowerCamelCase__ : Tuple = ReformerTokenizerFast lowerCamelCase__ : Optional[int] = True lowerCamelCase__ : List[Any] = False lowerCamelCase__ : Union[str, Any] = True def _UpperCAmelCase ( self ) -> List[Any]: super().setUp() lowercase__ : Union[str, Any] = ReformerTokenizer(a , keep_accents=a ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : Optional[Any] = '<s>' lowercase__ : Any = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a ) def _UpperCAmelCase ( self ) -> Tuple: lowercase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<unk>' ) self.assertEqual(vocab_keys[1] , '<s>' ) self.assertEqual(vocab_keys[-1] , 'j' ) self.assertEqual(len(a ) , 1_0_0_0 ) def _UpperCAmelCase ( self ) -> Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def _UpperCAmelCase ( self ) -> int: if not self.test_rust_tokenizer: return lowercase__ : List[Any] = self.get_tokenizer() lowercase__ : Dict = self.get_rust_tokenizer() lowercase__ : List[Any] = 'I was born in 92000, and this is falsé.' lowercase__ : Dict = tokenizer.tokenize(a ) lowercase__ : Tuple = rust_tokenizer.tokenize(a ) self.assertListEqual(a , a ) lowercase__ : str = tokenizer.encode(a , add_special_tokens=a ) lowercase__ : Tuple = rust_tokenizer.encode(a , add_special_tokens=a ) self.assertListEqual(a , a ) lowercase__ : Any = self.get_rust_tokenizer() lowercase__ : Optional[int] = tokenizer.encode(a ) lowercase__ : Tuple = rust_tokenizer.encode(a ) self.assertListEqual(a , a ) def _UpperCAmelCase ( self , a=1_5 ) -> str: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): lowercase__ : Tuple = self.rust_tokenizer_class.from_pretrained(a , **a ) # Simple input lowercase__ : Tuple = 'This is a simple input' lowercase__ : List[str] = ['This is a simple input 1', 'This is a simple input 2'] lowercase__ : Optional[int] = ('This is a simple input', 'This is a pair') lowercase__ : List[Any] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' ) # Simple input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' ) # Simple input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , ) # Pair input self.assertRaises(a , tokenizer_r.encode , a , max_length=a , padding='max_length' ) # Pair input self.assertRaises(a , tokenizer_r.encode_plus , a , max_length=a , padding='max_length' ) # Pair input self.assertRaises( a , tokenizer_r.batch_encode_plus , a , max_length=a , padding='max_length' , ) def _UpperCAmelCase ( self ) -> Optional[int]: pass def _UpperCAmelCase ( self ) -> List[Any]: lowercase__ : Union[str, Any] = ReformerTokenizer(a , keep_accents=a ) lowercase__ : str = tokenizer.tokenize('This is a test' ) self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(a ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] , ) lowercase__ : int = tokenizer.tokenize('I was born in 92000, and this is falsé.' ) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ] , ) lowercase__ : int = tokenizer.convert_tokens_to_ids(a ) self.assertListEqual( a , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] , ) lowercase__ : List[str] = tokenizer.convert_ids_to_tokens(a ) self.assertListEqual( a , [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ] , ) @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: lowercase__ : Union[str, Any] = 'Hello World!' lowercase__ : Optional[int] = [1_2_6, 3_2, 2_6_2, 1_5_2, 3_8, 7_2, 2_8_7] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: lowercase__ : List[str] = ( 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will' ' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' ) lowercase__ : int = [ 1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 3_5, 2_8, 2_7_5, 3, 2_5_9, 2_9_7, 2_6_0, 8_4, 4, 3_5, 1_1_0, 4_4, 8, 2_5_9, 9_1, 2_6_8, 2_1, 1_1, 2_0_9, 2_7_4, 1_0_9, 2_6_6, 2_7_7, 1_1_7, 8_6, 9_3, 3_1_5, 2_5_8, 2_7_8, 2_5_8, 2_7_7, 2_5_8, 0, 2_5_8, 2_8_8, 2_5_8, 3_1_9, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 0, 2_5_8, 2_8_7, 2_5_8, 3_1_5, 2_5_8, 2_8_9, 2_5_8, 2_7_8, 9_9, 2_6_9, 2_6_6, 2_6_2, 8, 2_5_9, 2_4_1, 4, 2_1_7, 2_3_0, 2_6_8, 2_6_6, 5_5, 1_6_8, 1_0_6, 7_5, 1_9_3, 2_6_6, 2_2_3, 2_7, 4_9, 2_6, 2_8_2, 2_5, 2_6_4, 2_9_9, 1_9, 2_6, 0, 2_5_8, 2_7_7, 1_1_7, 8_6, 9_3, 1_7_6, 1_8_3, 2_7_0, 1_1, 2_6_2, 4_2, 6_1, 2_6_5, ] self.assertListEqual(a , self.big_tokenizer.encode(a ) ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: import torch from transformers import ReformerConfig, ReformerModel # Build sequence lowercase__ : str = list(self.big_tokenizer.get_vocab().keys() )[:1_0] lowercase__ : List[Any] = ' '.join(a ) lowercase__ : List[Any] = self.big_tokenizer.encode_plus(a , return_tensors='pt' ) lowercase__ : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' ) lowercase__ : Optional[int] = ReformerConfig() # The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024) lowercase__ : Dict = encoded_sequence['input_ids'].shape lowercase__ : Dict = ReformerModel(a ) # Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**a ) model(**a ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: # fmt: off lowercase__ : Optional[Any] = {'input_ids': [[1_0_8, 2_6_5, 2_4, 1_1_1, 4, 2_5_8, 1_5_6, 7, 5_1, 2_7_9, 5_8, 7, 7_6, 2_5, 6_9, 2_7_8], [1_4_0, 2_4_3, 2_6_4, 1_3_4, 1_7, 2_6_7, 7_7, 2_6_3, 2_2, 2_6_2, 2_9_7, 2_5_8, 3_0_4, 1_7_7, 2_7_9, 2_6_6, 1_4, 8_9, 1_3, 3_5, 2_6_1, 2_9_9, 2_7_2, 1_3_7, 2_7_5, 2_7_8]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # This tokenizer does not know some characters like ")". # That is the reason why we use very simple texts here. # Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064 lowercase__ : List[Any] = [ 'This is a very simple sentence.', 'The quick brown fox jumps over the lazy dog.', ] self.tokenizer_integration_test_util( expected_encoding=a , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=a , sequences=a , )
77
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _snake_case = logging.getLogger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple: # in NER datasets, the last column is usually reserved for NER label _a : Optional[int] = label_idx def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : Any = mode.value _a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : int = 1 _a : int = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: _a : str = [] _a : str = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 _a : List[str] = [] _a : str = [] else: _a : List[Any] = line.split(""" """ ) words.append(splits[0] ) if len(UpperCAmelCase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) return examples def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]: _a : List[str] = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(UpperCAmelCase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(UpperCAmelCase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : List[Any] = f.read().splitlines() if "O" not in labels: _a : Union[str, Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCamelCase ( snake_case_ ): def __init__( self : Union[str, Any] ) -> List[str]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : Optional[int] = f.read().splitlines() if "O" not in labels: _a : Optional[Any] = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : List[Any] = mode.value _a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : List[str] = 1 _a : Optional[Any] = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(UpperCAmelCase__ ): _a : List[Any] = [] _a : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 return examples def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict: _a : Optional[Any] = 0 for sentence in parse_incr(UpperCAmelCase__ ): _a : List[str] = preds_list[example_id] _a : str = """""" for token in sentence: out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(UpperCAmelCase__ ) example_id += 1 def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
294
0
"""simple docstring""" def _lowerCAmelCase ( lowercase_ ): UpperCAmelCase = [0] * len(lowercase_ ) for i in range(1 , len(lowercase_ ) ): # use last results for better performance - dynamic programming UpperCAmelCase = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: UpperCAmelCase = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 UpperCAmelCase = j return prefix_result def _lowerCAmelCase ( lowercase_ ): return max(prefix_function(lowercase_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
78
"""simple docstring""" from __future__ import annotations import time import numpy as np _snake_case = [8, 5, 9, 7] _snake_case = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _snake_case = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase : def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None: _a : List[str] = claim_vector _a : List[Any] = allocated_resources_table _a : Union[str, Any] = maximum_claim_table def _lowercase ( self : Tuple ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _lowercase ( self : int ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _lowercase ( self : List[str] ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]: return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()} def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None: _a : List[Any] = self.__need() _a : Optional[int] = self.__allocated_resources_table _a : str = self.__available_resources() _a : Optional[Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: _a : int = False for each_need in need_list: _a : Optional[int] = True for index, need in enumerate(UpperCAmelCase__ ): if need > available_resources[index]: _a : List[Any] = False break if execution: _a : str = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: _a : Any = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(UpperCAmelCase__ ) # update available/freed resources stack _a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def _lowercase ( self : Any ) -> Optional[int]: print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
294
0
'''simple docstring''' def __lowercase ( __lowercase = 50 ) -> int: '''simple docstring''' _A = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"""{solution() = }""")
79
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar _snake_case = TypeVar('_T') class UpperCamelCase ( Generic[_T] ): def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None: _a : list[_T] = list(iterable or [] ) _a : list[_T] = [] def __len__( self : str ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[str] ) -> str: return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None: self._stacka.append(UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ) -> _T: _a : Any = self._stacka.pop _a : Union[str, Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
294
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowercase_ ( unittest.TestCase ): @slow def __a ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModel.from_pretrained(a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModel.from_pretrained(a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) @slow def __a ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModelForPreTraining.from_pretrained(a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModelForPreTraining.from_pretrained(a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) @slow def __a ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModelForCausalLM.from_pretrained(a , from_pt=a ) UpperCamelCase__ , UpperCamelCase__ = TFAutoModelForCausalLM.from_pretrained( a , output_loading_info=a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModelForCausalLM.from_pretrained(a , from_tf=a ) UpperCamelCase__ , UpperCamelCase__ = AutoModelForCausalLM.from_pretrained( a , output_loading_info=a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) @slow def __a ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModelWithLMHead.from_pretrained(a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) @slow def __a ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModelForMaskedLM.from_pretrained(a , from_pt=a ) UpperCamelCase__ , UpperCamelCase__ = TFAutoModelForMaskedLM.from_pretrained( a , output_loading_info=a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModelForMaskedLM.from_pretrained(a , from_tf=a ) UpperCamelCase__ , UpperCamelCase__ = AutoModelForMaskedLM.from_pretrained( a , output_loading_info=a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) @slow def __a ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(a , from_pt=a ) UpperCamelCase__ , UpperCamelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained( a , output_loading_info=a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(a , from_tf=a ) UpperCamelCase__ , UpperCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained( a , output_loading_info=a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) @slow def __a ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModelForSequenceClassification.from_pretrained(a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModelForSequenceClassification.from_pretrained(a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) @slow def __a ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: UpperCamelCase__ = AutoConfig.from_pretrained(a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = TFAutoModelForQuestionAnswering.from_pretrained(a , from_pt=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) UpperCamelCase__ = AutoModelForQuestionAnswering.from_pretrained(a , from_tf=a ) self.assertIsNotNone(a ) self.assertIsInstance(a , a ) def __a ( self ): UpperCamelCase__ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a ) self.assertIsInstance(a , a ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 ) UpperCamelCase__ = AutoModelWithLMHead.from_pretrained(a , from_tf=a ) self.assertIsInstance(a , a ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 ) def __a ( self ): UpperCamelCase__ = TFAutoModelWithLMHead.from_pretrained(a , from_pt=a ) self.assertIsInstance(a , a ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 ) UpperCamelCase__ = AutoModelWithLMHead.from_pretrained(a , from_tf=a ) self.assertIsInstance(a , a ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=a ) , 1_44_10 )
80
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _snake_case = False class UpperCamelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Optional[Any] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Tuple ) -> List[Any]: _a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Optional[Any] = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase__ ) _a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : Optional[Any] = generator.manual_seed(0 ) _a : str = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : int = """cyberpunk 2077""" _a : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Tuple = torch.manual_seed(0 ) _a : Any = pipe.dual_guided( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _a : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : int = """A painting of a squirrel eating a burger """ _a : Tuple = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.text_to_image( prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images _a : int = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images _a : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
294
0
"""simple docstring""" import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowerCamelCase_ : List[Any] = logging.getLogger(__name__) def _A ( ): """simple docstring""" a =argparse.ArgumentParser( description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' ) parser.add_argument( '''--dataset_name''' , type=lowercase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , ) parser.add_argument( '''--dataset_config''' , type=lowercase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' ) parser.add_argument( '''--tokenizer_name_or_path''' , type=lowercase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , ) parser.add_argument( '''--shard_size''' , type=lowercase , default=10_00 , help='''Number of entries to go in a single shard.''' , ) parser.add_argument('''--split''' , type=lowercase , default='''train''' , choices=['''train''', '''test''', '''validation'''] ) parser.add_argument( '''--limit''' , default=lowercase , type=lowercase , help='''Limit the number of shards (used for debugging).''' , ) parser.add_argument( '''--max_length''' , type=lowercase , default=5_12 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum''' ''' sequence length that is a multiple of 8.''' , ) parser.add_argument( '''--output_dir''' , default='''tf-tpu''' , type=lowercase , help='''Output directory where the TFRecord shards will be saved. If the''' ''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord''' ''' shards will be directly saved to a Google Cloud Storage bucket.''' , ) a =parser.parse_args() return args def _A ( lowercase ): """simple docstring""" def fn(lowercase ): return tokenizer(examples['''text'''] ) return fn def _A ( lowercase ): """simple docstring""" a =[] for i in range(len(tokenized_data['''input_ids'''] ) ): a ={ '''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ), '''attention_mask''': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ), } a =tf.train.Features(feature=lowercase ) a =tf.train.Example(features=lowercase ) a =example.SerializeToString() records.append(lowercase ) return records def _A ( lowercase ): """simple docstring""" a =datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: a =min(len(lowercase ) , args.limit ) a =dataset.select(range(lowercase ) ) print(f'''Limiting the dataset to {args.limit} entries.''' ) a =AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a =os.path.join(args.output_dir , args.split ) if not os.path.exists(lowercase ): os.makedirs(lowercase ) else: a =os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. a =tokenize_function(lowercase ) a =dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=['''text'''] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowercase ): # Concatenate all texts. a ={k: sum(examples[k] , [] ) for k in examples.keys()} a =len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a =(total_length // args.max_length) * args.max_length # Split by chunks of max_len. a ={ k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )] for k, t in concatenated_examples.items() } return result a =dataset_tokenized.map(lowercase , batched=lowercase , batch_size=10_00 , num_proc=4 ) a =0 a =0 for shard in range(0 , len(lowercase ) , args.shard_size ): a =grouped_dataset[shard : shard + args.shard_size] a =len(dataset_snapshot['''input_ids'''] ) a =os.path.join(lowercase , f'''dataset-{shard_count}-{records_containing}.tfrecord''' ) a =get_serialized_examples(lowercase ) with tf.io.TFRecordWriter(lowercase ) as out_file: for i in range(len(lowercase ) ): a =serialized_examples[i] out_file.write(lowercase ) print('''Wrote file {} containing {} records'''.format(lowercase , lowercase ) ) shard_count += 1 total_records += records_containing with open(f'''split-{args.split}-records-count.txt''' , '''w''' ) as f: print(f'''Total {args.split} records: {total_records}''' , file=lowercase ) if __name__ == "__main__": lowerCamelCase_ : Dict = parse_args() main(args)
81
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'vocab.json'} _snake_case = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _snake_case = {'mgp-str': 27} class UpperCamelCase ( snake_case_ ): UpperCamelCase : List[str] = VOCAB_FILES_NAMES UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int: super().__init__( unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle: _a : int = json.load(UpperCAmelCase__ ) _a : Optional[int] = {v: k for k, v in self.vocab.items()} @property def _lowercase ( self : Dict ) -> Union[str, Any]: return len(self.vocab ) def _lowercase ( self : Union[str, Any] ) -> str: return dict(self.vocab , **self.added_tokens_encoder ) def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]: _a : Tuple = [] for s in text: char_tokens.extend(UpperCAmelCase__ ) return char_tokens def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict: return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: return self.decoder.get(UpperCAmelCase__ ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) ) return _a : Tuple = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" ) return (vocab_file,)
294
0
A__ = """ # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git """ A__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] A__ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
82
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = (IPNDMScheduler,) UpperCamelCase : int = (('''num_inference_steps''', 50),) def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int: _a : Optional[int] = {"""num_train_timesteps""": 1000} config.update(**UpperCAmelCase__ ) return config def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: _a : Optional[int] = dict(self.forward_default_kwargs ) _a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : Union[str, Any] = 0.1 * sample _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Any = dummy_past_residuals[:] if time_step is None: _a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ ) new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Optional[Any] = dummy_past_residuals[:] _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : Tuple ) -> List[str]: pass def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: _a : Optional[Any] = dict(self.forward_default_kwargs ) _a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : List[Any] = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Union[str, Any] = self.get_scheduler_config() _a : Optional[Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) _a : Any = dummy_past_residuals[:] if time_step is None: _a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) _a : Optional[Any] = dummy_past_residuals[:] _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]: _a : Optional[int] = self.scheduler_classes[0] _a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) _a : int = 10 _a : List[Any] = self.dummy_model() _a : str = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _a : str = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample return sample def _lowercase ( self : int ) -> str: _a : Dict = dict(self.forward_default_kwargs ) _a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config() _a : Tuple = scheduler_class(**UpperCAmelCase__ ) _a : Tuple = self.dummy_sample _a : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ): scheduler.set_timesteps(UpperCAmelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ): _a : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] _a : Optional[Any] = dummy_past_residuals[:] _a : Optional[Any] = scheduler.timesteps[5] _a : str = scheduler.timesteps[6] _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) _a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self : List[str] ) -> List[str]: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> List[str]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : int ) -> List[Any]: _a : str = self.full_loop() _a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_mean.item() - 2540529 ) < 10
294
0
'''simple docstring''' from math import pi def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ): return 2 * pi * radius * (angle / 3_6_0) if __name__ == "__main__": print(arc_length(90, 10))
83
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'spiece.model'} _snake_case = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class UpperCamelCase ( snake_case_ ): def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None: _a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token _a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) _a : Optional[Any] = 3 _a : Tuple = do_lower_case _a : Tuple = remove_space _a : Tuple = keep_accents _a : Tuple = vocab_file _a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) _a : int = jieba _a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _lowercase ( self : Optional[Any] ) -> Any: return len(self.sp_model ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> List[str]: _a : Tuple = self.__dict__.copy() _a : Tuple = None return state def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict: _a : Tuple = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple = {} _a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict: if self.remove_space: _a : Optional[int] = """ """.join(inputs.strip().split() ) else: _a : List[Any] = inputs _a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ ) _a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] ) if self.do_lower_case: _a : Union[str, Any] = outputs.lower() return outputs def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: _a : str = self.preprocess_text(UpperCAmelCase__ ) _a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) _a : Union[str, Any] = [] for piece in pieces: if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _a : Dict = cur_pieces[1:] else: _a : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase__ ) else: new_pieces.append(UpperCAmelCase__ ) return new_pieces def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int: return self.sp_model.PieceToId(UpperCAmelCase__ ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any: return self.sp_model.IdToPiece(UpperCAmelCase__ ) def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict: _a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip() return out_string def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Optional[Any] = [self.sep_token_id] _a : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] return ([0] * len(UpperCAmelCase__ )) + [1, 1] def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Any = [self.sep_token_id] _a : Optional[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Union[str, Any] = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , """wb""" ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,) def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]: _a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
294
0
"""simple docstring""" import os def _snake_case ( lowercase__ : str = "matrix.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as in_file: lowerCAmelCase_ :str = in_file.read() lowerCAmelCase_ :Tuple = [[int(lowercase__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()] lowerCAmelCase_ :Tuple = [[0 for cell in row] for row in grid] lowerCAmelCase_ :str = len(grid[0] ) lowerCAmelCase_ :Union[str, Any] = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )] lowerCAmelCase_ :Optional[Any] = grid[0][0] for i in range(1 , lowercase__ ): lowerCAmelCase_ :Optional[int] = grid[0][i] + dp[0][i - 1] for i in range(1 , lowercase__ ): lowerCAmelCase_ :str = grid[i][0] + dp[i - 1][0] for i in range(1 , lowercase__ ): for j in range(1 , lowercase__ ): lowerCAmelCase_ :Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F"""{solution() = }""")
84
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase ( nn.Module ): def __init__( self : Union[str, Any] ) -> int: super().__init__() _a : Optional[Any] = nn.Linear(3 , 4 ) _a : Tuple = nn.BatchNormad(4 ) _a : Dict = nn.Linear(4 , 5 ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int: return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) ) class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]: return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]: return output + 1 class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Dict ) -> str: _a : List[Any] = ModelForTest() _a : str = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(test_model._hf_hook , UpperCAmelCase__ ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Dict = ModelForTest() _a : Dict = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ ) self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Dict ) -> int: _a : str = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Optional[Any] = test_model(x + 1 ) _a : str = test_model(x + 2 ) _a : Union[str, Any] = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : int = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : str = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : int = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) def _lowercase ( self : Tuple ) -> int: _a : Tuple = ModelForTest() _a : Union[str, Any] = torch.randn(2 , 3 ) _a : Optional[int] = test_model(UpperCAmelCase__ ) _a : int = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : List[Any] = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Dict = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : Any = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Optional[int] = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 ) def _lowercase ( self : Dict ) -> Optional[Any]: _a : Any = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Dict = test_model(UpperCAmelCase__ ) _a : Any = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _a : Any = True _a : Union[str, Any] = test_model(UpperCAmelCase__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _lowercase ( self : Optional[Any] ) -> str: _a : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _a : Optional[int] = torch.randn(2 , 3 ) _a : Any = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) ) _a : str = torch.randn(2 , 3 ).to(0 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(0 ) ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : int = torch.randn(2 , 3 ) _a : str = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload _a : List[str] = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Tuple = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Tuple ) -> List[str]: _a : str = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : List[Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : List[str] = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Dict ) -> str: _a : Optional[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : str = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Union[str, Any] = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Any = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
294
0
'''simple docstring''' def UpperCamelCase_( snake_case : int ): '''simple docstring''' if isinstance(snake_case , snake_case ): raise TypeError("'float' object cannot be interpreted as an integer" ) if isinstance(snake_case , snake_case ): raise TypeError("'str' object cannot be interpreted as an integer" ) if num == 0: return "0b0" snake_case_ = False if num < 0: snake_case_ = True snake_case_ = -num snake_case_ = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(snake_case ) for e in binary ) return "0b" + "".join(str(snake_case ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
85
"""simple docstring""" from __future__ import annotations def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(UpperCamelCase__ ): print(F"""{i}\t\t{d}""" ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for j in range(UpperCamelCase__ ): _a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Dict = [float("""inf""" )] * vertex_count _a : Any = 0.0 for _ in range(vertex_count - 1 ): for j in range(UpperCamelCase__ ): _a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _a : Any = distance[u] + w _a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input('Enter number of vertices: ').strip()) _snake_case = int(input('Enter number of edges: ').strip()) _snake_case = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) _snake_case , _snake_case , _snake_case = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) _snake_case = {'src': src, 'dst': dest, 'weight': weight} _snake_case = int(input('\nEnter shortest path source:').strip()) _snake_case = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
294
0
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ): try: __lowerCAmelCase : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __lowerCAmelCase : str = default else: # KEY is set, convert it to True or False. try: __lowerCAmelCase : Optional[int] = strtobool(_UpperCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value lowerCamelCase__ = parse_flag_from_env("""RUN_SLOW""", default=False) lowerCamelCase__ = parse_flag_from_env("""RUN_REMOTE""", default=False) lowerCamelCase__ = parse_flag_from_env("""RUN_LOCAL""", default=True) lowerCamelCase__ = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression lowerCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") lowerCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") lowerCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio lowerCamelCase__ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam lowerCamelCase__ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility lowerCamelCase__ = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows lowerCamelCase__ = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def __lowerCAmelCase (_UpperCamelCase ): try: import faiss # noqa except ImportError: __lowerCAmelCase : Dict = unittest.skip('test requires faiss' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import regex # noqa except ImportError: __lowerCAmelCase : Dict = unittest.skip('test requires regex' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import elasticsearch # noqa except ImportError: __lowerCAmelCase : Optional[int] = unittest.skip('test requires elasticsearch' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import sqlalchemy # noqa except ImportError: __lowerCAmelCase : Union[str, Any] = unittest.skip('test requires sqlalchemy' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.TORCH_AVAILABLE: __lowerCAmelCase : List[str] = unittest.skip('test requires PyTorch' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.TF_AVAILABLE: __lowerCAmelCase : List[Any] = unittest.skip('test requires TensorFlow' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.JAX_AVAILABLE: __lowerCAmelCase : Tuple = unittest.skip('test requires JAX' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.PIL_AVAILABLE: __lowerCAmelCase : Tuple = unittest.skip('test requires Pillow' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): def _require_spacy_model(_UpperCamelCase ): try: import spacy # noqa F401 spacy.load(_UpperCamelCase ) except ImportError: return unittest.skip('test requires spacy' )(_UpperCamelCase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(_UpperCamelCase ) )(_UpperCamelCase ) else: return test_case return _require_spacy_model def __lowerCAmelCase (_UpperCamelCase ): try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_slow_tests or _run_slow_tests == 0: __lowerCAmelCase : Union[str, Any] = unittest.skip('test is slow' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_local_tests or _run_local_tests == 0: __lowerCAmelCase : str = unittest.skip('test is local' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_packaged_tests or _run_packaged_tests == 0: __lowerCAmelCase : Union[str, Any] = unittest.skip('test is packaged' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_remote_tests or _run_remote_tests == 0: __lowerCAmelCase : int = unittest.skip('test requires remote' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (*_UpperCamelCase ): def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_UpperCamelCase ) and name.startswith('test' ): for decorator in decorators: __lowerCAmelCase : Any = decorator(_UpperCamelCase ) setattr(cls , _UpperCamelCase , _UpperCamelCase ) return cls return decorate class A__ ( _lowerCamelCase): pass class A__ ( _lowerCamelCase): A_ : Tuple = 0 A_ : List[Any] = 1 A_ : Optional[int] = 2 @contextmanager def __lowerCAmelCase (_UpperCamelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCamelCase=1e-1_6 ): __lowerCAmelCase : List[str] = requests.Session().request def timeout_request(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): # Change the url to an invalid url so that the connection hangs __lowerCAmelCase : Optional[int] = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) __lowerCAmelCase : List[Any] = timeout try: return online_request(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __lowerCAmelCase : Tuple = url __lowerCAmelCase : Optional[int] = e.args[0] __lowerCAmelCase : Any = (max_retry_error.args[0].replace('10.255.255.1' , F"OfflineMock[{url}]" ),) __lowerCAmelCase : List[str] = (max_retry_error,) raise def raise_connection_error(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): raise requests.ConnectionError('Offline mode is enabled.' , request=_UpperCamelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , _UpperCamelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , _UpperCamelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def __lowerCAmelCase (*_UpperCamelCase , **_UpperCamelCase ): __lowerCAmelCase : Tuple = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_UpperCamelCase , **_UpperCamelCase ) as tmp_dir: try: os.chdir(_UpperCamelCase ) yield finally: os.chdir(_UpperCamelCase ) @contextmanager def __lowerCAmelCase (): import gc gc.collect() __lowerCAmelCase : Optional[Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __lowerCAmelCase (): import gc gc.collect() __lowerCAmelCase : List[Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): return deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() def __lowerCAmelCase (_UpperCamelCase ): import decorator from requests.exceptions import HTTPError def _wrapper(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ): try: return func(*_UpperCamelCase , **_UpperCamelCase ) except HTTPError as err: if str(_UpperCamelCase ).startswith('500' ) or str(_UpperCamelCase ).startswith('502' ): pytest.xfail(str(_UpperCamelCase ) ) raise err return decorator.decorator(_wrapper , _UpperCamelCase ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = returncode __lowerCAmelCase : Optional[Any] = stdout __lowerCAmelCase : Optional[int] = stderr async def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): while True: __lowerCAmelCase : Optional[Any] = await stream.readline() if line: callback(_UpperCamelCase ) else: break async def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ): if echo: print('\nRunning: ' , ' '.join(_UpperCamelCase ) ) __lowerCAmelCase : Any = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __lowerCAmelCase : int = [] __lowerCAmelCase : Tuple = [] def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ): __lowerCAmelCase : Tuple = line.decode('utf-8' ).rstrip() sink.append(_UpperCamelCase ) if not quiet: print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ), ] , timeout=_UpperCamelCase , ) return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ): __lowerCAmelCase : List[Any] = asyncio.get_event_loop() __lowerCAmelCase : Union[str, Any] = loop.run_until_complete( _stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) ) __lowerCAmelCase : List[Any] = ' '.join(_UpperCamelCase ) if result.returncode > 0: __lowerCAmelCase : Union[str, Any] = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"'{cmd_str}' produced no output." ) return result def __lowerCAmelCase (): __lowerCAmelCase : str = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __lowerCAmelCase : Any = re.sub(r'^gw' , '' , _UpperCamelCase , 0 , re.M ) return int(_UpperCamelCase ) def __lowerCAmelCase (): __lowerCAmelCase : int = 2_9500 __lowerCAmelCase : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
86
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
import argparse import os import re UpperCamelCase = '''src/transformers''' # Pattern that looks at the indentation in a line. UpperCamelCase = re.compile(R'''^(\s*)\S''') # Pattern that matches `"key":" and puts `key` in group 0. UpperCamelCase = re.compile(R'''^\s*"([^"]+)":''') # Pattern that matches `_import_structure["key"]` and puts `key` in group 0. UpperCamelCase = re.compile(R'''^\s*_import_structure\["([^"]+)"\]''') # Pattern that matches `"key",` and puts `key` in group 0. UpperCamelCase = re.compile(R'''^\s*"([^"]+)",\s*$''') # Pattern that matches any `[stuff]` and puts `stuff` in group 0. UpperCamelCase = re.compile(R'''\[([^\]]+)\]''') def lowercase_ ( _lowerCamelCase : int): lowercase__ : str = _re_indent.search(_lowerCamelCase) return "" if search is None else search.groups()[0] def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple="" , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=None): lowercase__ : Optional[Any] = 0 lowercase__ : Optional[int] = code.split("\n") if start_prompt is not None: while not lines[index].startswith(_lowerCamelCase): index += 1 lowercase__ : str = ["\n".join(lines[:index])] else: lowercase__ : List[Any] = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). lowercase__ : Any = [lines[index]] index += 1 while index < len(_lowerCamelCase) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase)): if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: if len(_lowerCamelCase) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "): current_block.append(lines[index]) blocks.append("\n".join(_lowerCamelCase)) if index < len(_lowerCamelCase) - 1: lowercase__ : List[str] = [lines[index + 1]] index += 1 else: lowercase__ : List[str] = [] else: blocks.append("\n".join(_lowerCamelCase)) lowercase__ : List[str] = [lines[index]] else: current_block.append(lines[index]) index += 1 # Adds current block if it's nonempty. if len(_lowerCamelCase) > 0: blocks.append("\n".join(_lowerCamelCase)) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(_lowerCamelCase): blocks.append("\n".join(lines[index:])) return blocks def lowercase_ ( _lowerCamelCase : Optional[int]): def _inner(_lowerCamelCase : str): return key(_lowerCamelCase).lower().replace("_" , "") return _inner def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any=None): # If no key is provided, we use a noop. def noop(_lowerCamelCase : str): return x if key is None: lowercase__ : Any = noop # Constants are all uppercase, they go first. lowercase__ : Tuple = [obj for obj in objects if key(_lowerCamelCase).isupper()] # Classes are not all uppercase but start with a capital, they go second. lowercase__ : str = [obj for obj in objects if key(_lowerCamelCase)[0].isupper() and not key(_lowerCamelCase).isupper()] # Functions begin with a lowercase, they go last. lowercase__ : Any = [obj for obj in objects if not key(_lowerCamelCase)[0].isupper()] lowercase__ : Dict = ignore_underscore(_lowerCamelCase) return sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase) + sorted(_lowerCamelCase , key=_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str): # This inner function sort imports between [ ]. def _replace(_lowerCamelCase : List[Any]): lowercase__ : Optional[Any] = match.groups()[0] if "," not in imports: return f'''[{imports}]''' lowercase__ : Optional[int] = [part.strip().replace("\"" , "") for part in imports.split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: lowercase__ : Optional[int] = keys[:-1] return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)]) + "]" lowercase__ : List[Any] = import_statement.split("\n") if len(_lowerCamelCase) > 3: # Here we have to sort internal imports that are on several lines (one per name): # key: [ # "object1", # "object2", # ... # ] # We may have to ignore one or two lines on each side. lowercase__ : Dict = 2 if lines[1].strip() == "[" else 1 lowercase__ : Optional[Any] = [(i, _re_strip_line.search(_lowerCamelCase).groups()[0]) for i, line in enumerate(lines[idx:-idx])] lowercase__ : Any = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase: x[1]) lowercase__ : List[str] = [lines[x[0] + idx] for x in sorted_indices] return "\n".join(lines[:idx] + sorted_lines + lines[-idx:]) elif len(_lowerCamelCase) == 3: # Here we have to sort internal imports that are on one separate line: # key: [ # "object1", "object2", ... # ] if _re_bracket_content.search(lines[1]) is not None: lowercase__ : Any = _re_bracket_content.sub(_replace , lines[1]) else: lowercase__ : List[Any] = [part.strip().replace("\"" , "") for part in lines[1].split(",")] # We will have a final empty element if the line finished with a comma. if len(keys[-1]) == 0: lowercase__ : Optional[Any] = keys[:-1] lowercase__ : Optional[Any] = get_indent(lines[1]) + ", ".join([f'''"{k}"''' for k in sort_objects(_lowerCamelCase)]) return "\n".join(_lowerCamelCase) else: # Finally we have to deal with imports fitting on one line lowercase__ : Any = _re_bracket_content.sub(_replace , _lowerCamelCase) return import_statement def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=True): with open(_lowerCamelCase , encoding="utf-8") as f: lowercase__ : Any = f.read() if "_import_structure" not in code: return # Blocks of indent level 0 lowercase__ : List[Any] = split_code_in_indented_blocks( _lowerCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:") # We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt). for block_idx in range(1 , len(_lowerCamelCase) - 1): # Check if the block contains some `_import_structure`s thingy to sort. lowercase__ : Optional[int] = main_blocks[block_idx] lowercase__ : Any = block.split("\n") # Get to the start of the imports. lowercase__ : int = 0 while line_idx < len(_lowerCamelCase) and "_import_structure" not in block_lines[line_idx]: # Skip dummy import blocks if "import dummy" in block_lines[line_idx]: lowercase__ : List[str] = len(_lowerCamelCase) else: line_idx += 1 if line_idx >= len(_lowerCamelCase): continue # Ignore beginning and last line: they don't contain anything. lowercase__ : str = "\n".join(block_lines[line_idx:-1]) lowercase__ : Optional[Any] = get_indent(block_lines[1]) # Slit the internal block into blocks of indent level 1. lowercase__ : List[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase) # We have two categories of import key: list or _import_structure[key].append/extend lowercase__ : Optional[int] = _re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key # Grab the keys, but there is a trap: some lines are empty or just comments. lowercase__ : Dict = [(pattern.search(_lowerCamelCase).groups()[0] if pattern.search(_lowerCamelCase) is not None else None) for b in internal_blocks] # We only sort the lines with a key. lowercase__ : Optional[int] = [(i, key) for i, key in enumerate(_lowerCamelCase) if key is not None] lowercase__ : List[Any] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase: x[1])] # We reorder the blocks by leaving empty lines/comments as they were and reorder the rest. lowercase__ : Tuple = 0 lowercase__ : Tuple = [] for i in range(len(_lowerCamelCase)): if keys[i] is None: reorderded_blocks.append(internal_blocks[i]) else: lowercase__ : int = sort_objects_in_import(internal_blocks[sorted_indices[count]]) reorderded_blocks.append(_lowerCamelCase) count += 1 # And we put our main block back together with its first and last line. lowercase__ : Any = "\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]]) if code != "\n".join(_lowerCamelCase): if check_only: return True else: print(f'''Overwriting {file}.''') with open(_lowerCamelCase , "w" , encoding="utf-8") as f: f.write("\n".join(_lowerCamelCase)) def lowercase_ ( _lowerCamelCase : List[Any]=True): lowercase__ : Optional[int] = [] for root, _, files in os.walk(_lowerCamelCase): if "__init__.py" in files: lowercase__ : Optional[int] = sort_imports(os.path.join(_lowerCamelCase , "__init__.py") , check_only=_lowerCamelCase) if result: lowercase__ : List[str] = [os.path.join(_lowerCamelCase , "__init__.py")] if len(_lowerCamelCase) > 0: raise ValueError(f'''Would overwrite {len(_lowerCamelCase)} files, run `make style`.''') if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--check_only''', action='''store_true''', help='''Whether to only check or fix style.''') UpperCamelCase = parser.parse_args() sort_imports_in_all_inits(check_only=args.check_only)
87
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' _a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: _a : Tuple = 1 - (matter_density + radiation_density + dark_energy) _a : int = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _a : List[str] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
294
0
import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger __lowerCAmelCase : Any = get_logger(__name__) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Optional[str] = None ) -> Optional[Any]: """simple docstring""" __magic_name__ = ( os.path.join(UpperCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) __magic_name__ = Extractor def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str ) -> str: """simple docstring""" from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" __magic_name__ = os.path.abspath(UpperCamelCase__ ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase__ ) ) def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : bool ) -> bool: """simple docstring""" return force_extract or ( not os.path.isfile(UpperCamelCase__ ) and not (os.path.isdir(UpperCamelCase__ ) and os.listdir(UpperCamelCase__ )) ) def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ) -> str: """simple docstring""" __magic_name__ = self.extractor.infer_extractor_format(UpperCamelCase__ ) if not extractor_format: return input_path __magic_name__ = self._get_output_path(UpperCamelCase__ ) if self._do_extract(UpperCamelCase__ , UpperCamelCase__ ): self.extractor.extract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return output_path class UpperCAmelCase_ ( _A ): '''simple docstring''' @classmethod @abstractmethod def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : Union[str, Any] ) -> bool: """simple docstring""" ... @staticmethod @abstractmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" ... class UpperCAmelCase_ ( _A , _A ): '''simple docstring''' a__ = [] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> List[str]: """simple docstring""" with open(UpperCamelCase__ , """rb""" ) as f: return f.read(UpperCamelCase__ ) @classmethod def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool: """simple docstring""" if not magic_number: __magic_name__ = max(len(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) try: __magic_name__ = cls.read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) except OSError: return False return any(magic_number.startswith(UpperCamelCase__ ) for cls_magic_number in cls.magic_numbers ) class UpperCAmelCase_ ( _A ): '''simple docstring''' @classmethod def _lowercase ( cls : Optional[Any] , UpperCamelCase__ : Union[Path, str] , **UpperCamelCase__ : int ) -> bool: """simple docstring""" return tarfile.is_tarfile(UpperCamelCase__ ) @staticmethod def _lowercase ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] ) -> Union[str, Any]: """simple docstring""" def resolved(UpperCamelCase__ : str ) -> str: return os.path.realpath(os.path.abspath(UpperCamelCase__ ) ) def badpath(UpperCamelCase__ : str , UpperCamelCase__ : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ).startswith(UpperCamelCase__ ) def badlink(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> bool: # Links are interpreted relative to the directory containing the link __magic_name__ = resolved(os.path.join(UpperCamelCase__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCamelCase__ ) __magic_name__ = resolved(UpperCamelCase__ ) for finfo in members: if badpath(finfo.name , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' ) elif finfo.issym() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' ) elif finfo.islnk() and badlink(UpperCamelCase__ , UpperCamelCase__ ): logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' ) else: yield finfo @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) __magic_name__ = tarfile.open(UpperCamelCase__ ) tar_file.extractall(UpperCamelCase__ , members=TarExtractor.safemembers(UpperCamelCase__ , UpperCamelCase__ ) ) tar_file.close() class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x1F\x8B"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" with gzip.open(UpperCamelCase__ , """rb""" ) as gzip_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [ B"""PK\x03\x04""", B"""PK\x05\x06""", # empty archive B"""PK\x07\x08""", # spanned archive ] @classmethod def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bytes = b"" ) -> bool: """simple docstring""" if super().is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCamelCase__ , """rb""" ) as fp: __magic_name__ = _EndRecData(UpperCamelCase__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: __magic_name__ = fp.read(UpperCamelCase__ ) # CD is where we expect it to be if len(UpperCamelCase__ ) == sizeCentralDir: __magic_name__ = struct.unpack(UpperCamelCase__ , UpperCamelCase__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with zipfile.ZipFile(UpperCamelCase__ , """r""" ) as zip_file: zip_file.extractall(UpperCamelCase__ ) zip_file.close() class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\xFD\x37\x7A\x58\x5A\x00"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" with lzma.open(UpperCamelCase__ ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) __magic_name__ = rarfile.RarFile(UpperCamelCase__ ) rf.extractall(UpperCamelCase__ ) rf.close() class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x28\xb5\x2F\xFD"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd __magic_name__ = zstd.ZstdDecompressor() with open(UpperCamelCase__ , """rb""" ) as ifh, open(UpperCamelCase__ , """wb""" ) as ofh: dctx.copy_stream(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x42\x5A\x68"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" with bza.open(UpperCamelCase__ , """rb""" ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x37\x7A\xBC\xAF\x27\x1C"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with pyazr.SevenZipFile(UpperCamelCase__ , """r""" ) as archive: archive.extractall(UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [B"""\x04\x22\x4D\x18"""] @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] ) -> None: """simple docstring""" if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(UpperCamelCase__ , """rb""" ) as compressed_file: with open(UpperCamelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCamelCase__ , UpperCamelCase__ ) class UpperCAmelCase_ : '''simple docstring''' a__ = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def _lowercase ( cls : Tuple ) -> Tuple: """simple docstring""" return max( len(UpperCamelCase__ ) for extractor in cls.extractors.values() if issubclass(UpperCamelCase__ , UpperCamelCase__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _lowercase ( UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : int ) -> Union[str, Any]: """simple docstring""" try: return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase__ , magic_number_length=UpperCamelCase__ ) except OSError: return b"" @classmethod def _lowercase ( cls : List[Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : bool = False ) -> bool: """simple docstring""" warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=UpperCamelCase__ , ) __magic_name__ = cls.infer_extractor_format(UpperCamelCase__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def _lowercase ( cls : Dict , UpperCamelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/> """simple docstring""" __magic_name__ = cls._get_magic_number_max_length() __magic_name__ = cls._read_magic_number(UpperCamelCase__ , UpperCamelCase__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCamelCase__ , magic_number=UpperCamelCase__ ): return extractor_format @classmethod def _lowercase ( cls : Union[str, Any] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Union[Path, str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None: """simple docstring""" os.makedirs(os.path.dirname(UpperCamelCase__ ) , exist_ok=UpperCamelCase__ ) # Prevent parallel extractions __magic_name__ = str(Path(UpperCamelCase__ ).with_suffix(""".lock""" ) ) with FileLock(UpperCamelCase__ ): shutil.rmtree(UpperCamelCase__ , ignore_errors=UpperCamelCase__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCamelCase__ , UpperCamelCase__ ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=UpperCamelCase__ , ) __magic_name__ = extractor if extractor != """deprecated""" else extractor_format else: __magic_name__ = cls.extractors[extractor_format] return extractor.extract(UpperCamelCase__ , UpperCamelCase__ ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=UpperCamelCase__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCamelCase__ ): return extractor.extract(UpperCamelCase__ , UpperCamelCase__ )
88
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : List[Any] ) -> Dict: _a : Optional[int] = tempfile.mkdtemp() _a : Optional[Any] = SamImageProcessor() _a : int = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : str ) -> int: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Tuple ) -> Dict: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Dict ) -> Dict: _a : List[Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) -> Tuple: _a : Optional[Any] = self.get_image_processor() _a : int = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Union[str, Any] = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _lowercase ( self : Optional[Any] ) -> Optional[Any]: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = [torch.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : Optional[int] = [[683, 1024]] _a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : int = processor.post_process_masks( UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : Optional[Any] = [np.ones((1, 3, 5, 5) )] _a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : List[str] = [[1, 0], [0, 1]] with self.assertRaises(UpperCAmelCase__ ): _a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) @require_vision @require_tf class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Any ) -> List[str]: _a : List[str] = tempfile.mkdtemp() _a : Any = SamImageProcessor() _a : Union[str, Any] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Dict ) -> List[str]: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: _a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> str: _a : Union[str, Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : int = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _lowercase ( self : Optional[Any] ) -> int: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Any = [tf.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : str = [[683, 1024]] _a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Union[str, Any] = processor.post_process_masks( UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : List[Any] = [np.ones((1, 3, 5, 5) )] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Dict = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _a : List[Any] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : str ) -> Optional[Any]: _a : Optional[Any] = tempfile.mkdtemp() _a : Dict = SamImageProcessor() _a : List[str] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Tuple ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : str ) -> int: _a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _lowercase ( self : int ) -> List[Any]: _a : Optional[Any] = self.get_image_processor() _a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _a : str = [tf.convert_to_tensor(UpperCAmelCase__ )] _a : Optional[int] = [torch.tensor(UpperCAmelCase__ )] _a : Union[str, Any] = [[1764, 2646]] _a : List[str] = [[683, 1024]] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) _a : List[str] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _lowercase ( self : str ) -> Optional[Any]: _a : List[Any] = self.get_image_processor() _a : Any = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Dict = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() _a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
294
0
'''simple docstring''' import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase = logging.get_logger(__name__) __lowerCAmelCase = '''▁''' __lowerCAmelCase = { '''vocab_file''': '''vocab.json''', '''spm_file''': '''sentencepiece.bpe.model''', '''tokenizer_config_file''': '''tokenizer_config.json''', } __lowerCAmelCase = { '''vocab_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''', }, '''spm_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''', }, '''tokenizer_config_file''': { '''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''', '''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''', }, } __lowerCAmelCase = { '''facebook/m2m100_418M''': 1_024, } # fmt: off __lowerCAmelCase = { '''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''], '''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de'''] } class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask'] lowerCAmelCase : List[int] = [] lowerCAmelCase : List[int] = [] def __init__( self : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Optional[Any]=None ,_UpperCAmelCase : Union[str, Any]="<s>" ,_UpperCAmelCase : Union[str, Any]="</s>" ,_UpperCAmelCase : int="</s>" ,_UpperCAmelCase : Tuple="<pad>" ,_UpperCAmelCase : str="<unk>" ,_UpperCAmelCase : Tuple="m2m100" ,_UpperCAmelCase : Optional[Dict[str, Any]] = None ,_UpperCAmelCase : List[Any]=8 ,**_UpperCAmelCase : Union[str, Any] ,): _a : int = {} if sp_model_kwargs is None else sp_model_kwargs _a : int = language_codes _a : List[Any] = FAIRSEQ_LANGUAGE_CODES[language_codes] _a : Union[str, Any] = {lang_code: F"""__{lang_code}__""" for lang_code in fairseq_language_code} _a : List[str] = kwargs.get('additional_special_tokens' ,[] ) kwargs["additional_special_tokens"] += [ self.get_lang_token(_UpperCAmelCase ) for lang_code in fairseq_language_code if self.get_lang_token(_UpperCAmelCase ) not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=_UpperCAmelCase ,tgt_lang=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,sep_token=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,pad_token=_UpperCAmelCase ,language_codes=_UpperCAmelCase ,sp_model_kwargs=self.sp_model_kwargs ,num_madeup_words=_UpperCAmelCase ,**_UpperCAmelCase ,) _a : Any = vocab_file _a : int = load_json(_UpperCAmelCase ) _a : Dict = {v: k for k, v in self.encoder.items()} _a : int = spm_file _a : int = load_spm(_UpperCAmelCase ,self.sp_model_kwargs ) _a : Any = len(self.encoder ) _a : str = { self.get_lang_token(_UpperCAmelCase ): self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase ) } _a : Optional[int] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_UpperCAmelCase )} _a : Dict = {v: k for k, v in self.lang_token_to_id.items()} _a : Union[str, Any] = src_lang if src_lang is not None else 'en' _a : Optional[int] = tgt_lang _a : Optional[Any] = self.get_lang_id(self._src_lang ) self.set_src_lang_special_tokens(self._src_lang ) _a : List[Any] = num_madeup_words @property def __lowercase ( self : int ): return len(self.encoder ) + len(self.lang_token_to_id ) @property def __lowercase ( self : Optional[int] ): return self._src_lang @src_lang.setter def __lowercase ( self : str ,_UpperCAmelCase : str ): _a : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ): return self.sp_model.encode(_UpperCAmelCase ,out_type=_UpperCAmelCase ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ): if token in self.lang_token_to_id: return self.lang_token_to_id[token] return self.encoder.get(_UpperCAmelCase ,self.encoder[self.unk_token] ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : int ): if index in self.id_to_lang_token: return self.id_to_lang_token[index] return self.decoder.get(_UpperCAmelCase ,self.unk_token ) def __lowercase ( self : int ,_UpperCAmelCase : Dict ): _a : Dict = [] _a : List[str] = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_UpperCAmelCase ) + token _a : Optional[Any] = [] else: current_sub_tokens.append(_UpperCAmelCase ) out_string += self.sp_model.decode(_UpperCAmelCase ) return out_string.strip() def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ,_UpperCAmelCase : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase ,token_ids_a=_UpperCAmelCase ,already_has_special_tokens=_UpperCAmelCase ) _a : List[str] = [1] * len(self.prefix_tokens ) _a : Any = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones def __lowercase ( self : Optional[int] ,_UpperCAmelCase : List[int] ,_UpperCAmelCase : Optional[List[int]] = None ): if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def __lowercase ( self : List[Any] ): _a : Dict = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): _a : Dict = self.__dict__.copy() _a : Any = None return state def __setstate__( self : Union[str, Any] ,_UpperCAmelCase : Dict ): _a : List[Any] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): _a : Union[str, Any] = {} _a : Any = load_spm(self.spm_file ,self.sp_model_kwargs ) def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ): _a : List[str] = Path(_UpperCAmelCase ) if not save_dir.is_dir(): raise OSError(F"""{save_directory} should be a directory""" ) _a : int = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) _a : Tuple = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder ,_UpperCAmelCase ) if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file ,_UpperCAmelCase ) elif not os.path.isfile(self.spm_file ): with open(_UpperCAmelCase ,'wb' ) as fi: _a : Tuple = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (str(_UpperCAmelCase ), str(_UpperCAmelCase )) def __lowercase ( self : List[str] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : str = "en" ,_UpperCAmelCase : Optional[List[str]] = None ,_UpperCAmelCase : str = "ro" ,**_UpperCAmelCase : List[Any] ,): _a : int = src_lang _a : List[str] = tgt_lang self.set_src_lang_special_tokens(self.src_lang ) return super().prepare_seqaseq_batch(_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase ) def __lowercase ( self : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] ,**_UpperCAmelCase : List[Any] ): if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) _a : List[str] = src_lang _a : Optional[int] = self(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ,**_UpperCAmelCase ) _a : List[str] = self.get_lang_id(_UpperCAmelCase ) _a : List[str] = tgt_lang_id return inputs def __lowercase ( self : List[str] ): self.set_src_lang_special_tokens(self.src_lang ) def __lowercase ( self : Any ): self.set_tgt_lang_special_tokens(self.tgt_lang ) def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : str ): _a : Tuple = self.get_lang_token(_UpperCAmelCase ) _a : Optional[Any] = self.lang_token_to_id[lang_token] _a : Union[str, Any] = [self.cur_lang_id] _a : Union[str, Any] = [self.eos_token_id] def __lowercase ( self : List[str] ,_UpperCAmelCase : str ): _a : int = self.get_lang_token(_UpperCAmelCase ) _a : Optional[Any] = self.lang_token_to_id[lang_token] _a : Dict = [self.cur_lang_id] _a : Optional[int] = [self.eos_token_id] def __lowercase ( self : str ,_UpperCAmelCase : str ): return self.lang_code_to_token[lang] def __lowercase ( self : List[Any] ,_UpperCAmelCase : str ): _a : Optional[int] = self.get_lang_token(_UpperCAmelCase ) return self.lang_token_to_id[lang_token] def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> sentencepiece.SentencePieceProcessor: _a : int = sentencepiece.SentencePieceProcessor(**lowerCAmelCase_ ) spm.Load(str(lowerCAmelCase_ ) ) return spm def __lowerCamelCase ( lowerCAmelCase_ ) -> Union[Dict, List]: with open(lowerCAmelCase_ , 'r' ) as f: return json.load(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> None: with open(lowerCAmelCase_ , 'w' ) as f: json.dump(lowerCAmelCase_ , lowerCAmelCase_ , indent=2 )
89
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _snake_case = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } _snake_case = { '169M': 768, '430M': 1024, '1B5': 2048, '3B': 2560, '7B': 4096, '14B': 5120, } def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : int = list(state_dict.keys() ) for name in state_dict_keys: _a : str = state_dict.pop(UpperCamelCase__ ) # emb -> embedding if name.startswith("""emb.""" ): _a : Dict = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): _a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention _a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ ) # ffn -> feed_forward _a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): _a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): _a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): _a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": _a : Optional[int] = """rwkv.""" + name _a : Any = weight return state_dict def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ): '''simple docstring''' # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) _a : Tuple = 5_0_2_7_7 _a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: _a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ ) _a : int = len(UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) # 2. Build the config _a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _a : Tuple = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) _a : List[Any] = RwkvConfig( vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(UpperCamelCase__ ) # 3. Download model file then convert state_dict _a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ ) _a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : List[str] = convert_state_dict(UpperCamelCase__ ) # 4. Split in shards and save _a , _a : List[str] = shard_checkpoint(UpperCamelCase__ ) for shard_file, shard in shards.items(): torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if index is not None: _a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) # Save the index as well with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f: _a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n""" f.write(UpperCamelCase__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) _a : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) _a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" ) tokenizer.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) _snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
294
0
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ): """simple docstring""" snake_case_ = KandinskyVaaControlnetPipeline snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] snake_case_ = ['''image_embeds''', '''negative_image_embeds''', '''hint'''] snake_case_ = [ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] snake_case_ = False @property def lowercase_ ( self ) -> str: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return 32 @property def lowercase_ ( self ) -> Tuple: '''simple docstring''' return self.time_input_dim @property def lowercase_ ( self ) -> str: '''simple docstring''' return self.time_input_dim * 4 @property def lowercase_ ( self ) -> List[Any]: '''simple docstring''' return 100 @property def lowercase_ ( self ) -> Any: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCamelCase = UNetaDConditionModel(**lowerCamelCase__ ) return model @property def lowercase_ ( self ) -> Union[str, Any]: '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowercase_ ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) __lowerCamelCase = VQModel(**self.dummy_movq_kwargs ) return model def lowercase_ ( self ) -> Optional[int]: '''simple docstring''' __lowerCamelCase = self.dummy_unet __lowerCamelCase = self.dummy_movq __lowerCamelCase = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type='epsilon' , thresholding=lowerCamelCase__ , ) __lowerCamelCase = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Union[str, Any]: '''simple docstring''' __lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) __lowerCamelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCamelCase__ ) # create hint __lowerCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ ) if str(lowerCamelCase__ ).startswith('mps' ): __lowerCamelCase = torch.manual_seed(lowerCamelCase__ ) else: __lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ ) __lowerCamelCase = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def lowercase_ ( self ) -> str: '''simple docstring''' __lowerCamelCase = 'cpu' __lowerCamelCase = self.get_dummy_components() __lowerCamelCase = self.pipeline_class(**lowerCamelCase__ ) __lowerCamelCase = pipe.to(lowerCamelCase__ ) pipe.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) ) __lowerCamelCase = output.images __lowerCamelCase = pipe( **self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0] __lowerCamelCase = image[0, -3:, -3:, -1] __lowerCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCamelCase = np.array( [0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase_ ( self ) -> List[Any]: '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase_ ( self ) -> Any: '''simple docstring''' __lowerCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' ) __lowerCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) __lowerCamelCase = torch.from_numpy(np.array(lowerCamelCase__ ) ).float() / 2_55.0 __lowerCamelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __lowerCamelCase = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(lowerCamelCase__ ) __lowerCamelCase = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa ) __lowerCamelCase = pipeline.to(lowerCamelCase__ ) pipeline.set_progress_bar_config(disable=lowerCamelCase__ ) __lowerCamelCase = 'A robot, 4k photo' __lowerCamelCase = torch.Generator(device='cuda' ).manual_seed(0 ) __lowerCamelCase , __lowerCamelCase = pipe_prior( lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __lowerCamelCase = torch.Generator(device='cuda' ).manual_seed(0 ) __lowerCamelCase = pipeline( image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , hint=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=100 , output_type='np' , ) __lowerCamelCase = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
90
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> List[str]: _a : Any = """laion/clap-htsat-unfused""" _a : Union[str, Any] = tempfile.mkdtemp() def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict: return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : List[str] ) -> Optional[int]: _a : List[str] = self.get_tokenizer() _a : Any = self.get_feature_extractor() _a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) _a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> Optional[int]: _a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> Optional[Any]: _a : Optional[int] = self.get_feature_extractor() _a : Tuple = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = floats_list((3, 1000) ) _a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Tuple ) -> Optional[int]: _a : List[str] = self.get_feature_extractor() _a : Any = self.get_tokenizer() _a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Optional[int] = """This is a test string""" _a : Tuple = processor(text=UpperCAmelCase__ ) _a : int = tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : List[Any] ) -> Any: _a : str = self.get_feature_extractor() _a : List[str] = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : Dict = processor.batch_decode(UpperCAmelCase__ ) _a : Any = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> List[str]: _a : str = self.get_feature_extractor() _a : Optional[Any] = self.get_tokenizer() _a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
294
0
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np UpperCAmelCase_ : int = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 UpperCAmelCase_ : str = typing.Union[np.floataa, int, float] # noqa: UP007 def _A (__a , __a ) -> VectorOut: """simple docstring""" return np.sqrt(np.sum((np.asarray(__a ) - np.asarray(__a )) ** 2 ) ) def _A (__a , __a ) -> VectorOut: """simple docstring""" return sum((va - va) ** 2 for va, va in zip(__a , __a ) ) ** (1 / 2) if __name__ == "__main__": def _A () -> None: """simple docstring""" from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) ) benchmark()
91
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case = logging.get_logger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
294
0
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any ): return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2 def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ): return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x[column] ) def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any]=float("inf" ) ): for i in range(points_counts - 1 ): for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase = current_dis return min_dis def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=float("inf" ) ): for i in range(min(6 , points_counts - 1 ) , SCREAMING_SNAKE_CASE_ ): for j in range(max(0 , i - 6 ) , SCREAMING_SNAKE_CASE_ ): __lowerCAmelCase = euclidean_distance_sqr(points[i] , points[j] ) if current_dis < min_dis: __lowerCAmelCase = current_dis return min_dis def _a ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] ): # base case if points_counts <= 3: return dis_between_closest_pair(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # recursion __lowerCAmelCase = points_counts // 2 __lowerCAmelCase = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , points_sorted_on_y[:mid] , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , points_sorted_on_y[mid:] , points_counts - mid ) __lowerCAmelCase = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = [] for point in points_sorted_on_x: if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis: cross_strip.append(SCREAMING_SNAKE_CASE_ ) __lowerCAmelCase = dis_between_closest_in_strip( SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) return min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ): __lowerCAmelCase = column_based_sort(SCREAMING_SNAKE_CASE_ , column=0 ) __lowerCAmelCase = column_based_sort(SCREAMING_SNAKE_CASE_ , column=1 ) return ( closest_pair_of_points_sqr( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ** 0.5 if __name__ == "__main__": UpperCamelCase__ = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)] print("""Distance:""", closest_pair_of_points(points, len(points)))
92
"""simple docstring""" import unittest import numpy as np def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ): '''simple docstring''' _a : List[Any] = np.shape(UpperCamelCase__ ) _a : Any = np.shape(UpperCamelCase__ ) _a : Union[str, Any] = np.shape(UpperCamelCase__ ) if shape_a[0] != shape_b[0]: _a : int = ( """Expected the same number of rows for A and B. """ F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(UpperCamelCase__ ) if shape_b[1] != shape_c[1]: _a : Tuple = ( """Expected the same number of columns for B and C. """ F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(UpperCamelCase__ ) _a : int = pseudo_inv if a_inv is None: try: _a : Optional[int] = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> None: _a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Optional[int] = np.array([[2, 1], [6, 3]] ) _a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _a : Union[str, Any] = np.block([[a, b], [b.T, c]] ) _a : int = np.linalg.det(UpperCAmelCase__ ) _a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ ) _a : List[Any] = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def _lowercase ( self : int ) -> None: _a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> None: _a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) _a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
294
0
'''simple docstring''' def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ): """simple docstring""" lowercase_ : List[str] = [0] * len(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = [] lowercase_ : int = [] lowercase_ : Optional[int] = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(__SCREAMING_SNAKE_CASE ) ): if indegree[i] == 0: queue.append(__SCREAMING_SNAKE_CASE ) while queue: lowercase_ : List[str] = queue.pop(0 ) cnt += 1 topo.append(__SCREAMING_SNAKE_CASE ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(__SCREAMING_SNAKE_CASE ) if cnt != len(__SCREAMING_SNAKE_CASE ): print('''Cycle exists''' ) else: print(__SCREAMING_SNAKE_CASE ) # Adjacency List of Graph _lowercase : Dict = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
93
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = torch.device('cpu') def lowerCAmelCase__ ( ): '''simple docstring''' _a : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" _a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Any = dct.pop(UpperCamelCase__ ) _a : Dict = val def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : Tuple = [] for k in state_dict.keys(): _a : Any = k if ".pwconv" in k: _a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _a : int = k_new.split(""".""" ) if ls[2].isdigit(): _a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _a : Optional[int] = 1_0_0_0 _a : Optional[Any] = """huggingface/label-files""" _a : Optional[Any] = """imagenet-1k-id2label.json""" _a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) _a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _a : Dict = idalabel _a : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _a : Any = [3, 3, 6, 4] _a : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": _a : Any = [3, 3, 9, 6] _a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": _a : List[Any] = [4, 3, 1_0, 5] _a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": _a : List[Any] = [4, 4, 1_2, 6] _a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ ) else: _a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : int = checkpoint _a : Optional[Any] = create_rename_keys(UpperCamelCase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model _a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval() hf_model.load_state_dict(UpperCamelCase__ ) # prepare test inputs _a : Any = prepare_img() _a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" ) # compare outputs from both models _a : Dict = get_expected_output(UpperCamelCase__ ) _a : int = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') _snake_case = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
294
0
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu snake_case : str = get_tests_dir() + '''/test_data/fsmt/fsmt_val_data.json''' with io.open(filename, '''r''', encoding='''utf-8''') as f: snake_case : Union[str, Any] = json.load(f) @require_torch class _snake_case ( unittest.TestCase ): def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): return FSMTTokenizer.from_pretrained(_lowerCamelCase ) def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ): a :Tuple = FSMTForConditionalGeneration.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['''en-ru''', 26.0], ['''ru-en''', 22.0], ['''en-de''', 22.0], ['''de-en''', 29.0], ] ) @slow def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ): # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality a :Tuple = F'''facebook/wmt19-{pair}''' a :Optional[Any] = self.get_tokenizer(_lowerCamelCase ) a :str = self.get_model(_lowerCamelCase ) a :Optional[int] = bleu_data[pair]['''src'''] a :Optional[Any] = bleu_data[pair]['''tgt'''] a :str = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase ) a :Optional[Any] = model.generate( input_ids=batch.input_ids , num_beams=8 , ) a :str = tokenizer.batch_decode( _lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase ) a :int = calculate_bleu(_lowerCamelCase , _lowerCamelCase ) print(_lowerCamelCase ) self.assertGreaterEqual(scores['''bleu'''] , _lowerCamelCase )
94
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['PerceiverFeatureExtractor'] _snake_case = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
from __future__ import annotations class __lowerCAmelCase : def __init__( self , lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Optional[Any] =TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " "int or float." ) if len(lowerCAmelCase__ ) != 0: a__ : str =len(rows[0] ) if cols == 0: raise error for row in rows: if len(lowerCAmelCase__ ) != cols: raise error for value in row: if not isinstance(lowerCAmelCase__ , (int, float) ): raise error a__ : Union[str, Any] =rows else: a__ : List[Any] =[] def _lowercase ( self ) -> list[list[int]]: '''simple docstring''' return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )] @property def _lowercase ( self ) -> int: '''simple docstring''' return len(self.rows ) @property def _lowercase ( self ) -> int: '''simple docstring''' return len(self.rows[0] ) @property def _lowercase ( self ) -> tuple[int, int]: '''simple docstring''' return (self.num_rows, self.num_columns) @property def _lowercase ( self ) -> bool: '''simple docstring''' return self.order[0] == self.order[1] def _lowercase ( self ) -> Matrix: '''simple docstring''' a__ : Any =[ [0 if column_num != row_num else 1 for column_num in range(self.num_rows )] for row_num in range(self.num_rows ) ] return Matrix(lowerCAmelCase__ ) def _lowercase ( self ) -> int: '''simple docstring''' if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0] ) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0]) ) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns ) ) def _lowercase ( self ) -> bool: '''simple docstring''' return bool(self.determinant() ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : int =[ [ self.rows[other_row][other_column] for other_column in range(self.num_columns ) if other_column != column ] for other_row in range(self.num_rows ) if other_row != row ] return Matrix(lowerCAmelCase__ ).determinant() def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' if (row + column) % 2 == 0: return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) def _lowercase ( self ) -> Matrix: '''simple docstring''' return Matrix( [ [self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )] for row in range(self.num_rows ) ] ) def _lowercase ( self ) -> Matrix: '''simple docstring''' return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns ) ] for row in range(self.minors().num_rows ) ] ) def _lowercase ( self ) -> Matrix: '''simple docstring''' a__ : Tuple =[ [self.cofactors().rows[column][row] for column in range(self.num_columns )] for row in range(self.num_rows ) ] return Matrix(lowerCAmelCase__ ) def _lowercase ( self ) -> Matrix: '''simple docstring''' a__ : List[Any] =self.determinant() if not determinant: raise TypeError("Only matrices with a non-zero determinant have an inverse" ) return self.adjugate() * (1 / determinant) def __repr__( self ) -> str: '''simple docstring''' return str(self.rows ) def __str__( self ) -> str: '''simple docstring''' if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0] ) ) + "]]" return ( "[" + "\n ".join( [ "[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]" for row in self.rows ] ) + "]" ) def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None: '''simple docstring''' a__ : Union[str, Any] =TypeError("Row must be a list containing all ints and/or floats" ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise type_error for value in row: if not isinstance(lowerCAmelCase__ , (int, float) ): raise type_error if len(lowerCAmelCase__ ) != self.num_columns: raise ValueError( "Row must be equal in length to the other rows in the matrix" ) if position is None: self.rows.append(lowerCAmelCase__ ) else: a__ : Optional[int] =self.rows[0:position] + [row] + self.rows[position:] def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None: '''simple docstring''' a__ : Tuple =TypeError( "Column must be a list containing all ints and/or floats" ) if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise type_error for value in column: if not isinstance(lowerCAmelCase__ , (int, float) ): raise type_error if len(lowerCAmelCase__ ) != self.num_rows: raise ValueError( "Column must be equal in length to the other columns in the matrix" ) if position is None: a__ : Union[str, Any] =[self.rows[i] + [column[i]] for i in range(self.num_rows )] else: a__ : Dict =[ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows ) ] def __eq__( self , lowerCAmelCase__ ) -> bool: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): return NotImplemented return self.rows == other.rows def __ne__( self , lowerCAmelCase__ ) -> bool: '''simple docstring''' return not self == other def __neg__( self ) -> Matrix: '''simple docstring''' return self * -1 def __add__( self , lowerCAmelCase__ ) -> Matrix: '''simple docstring''' if self.order != other.order: raise ValueError("Addition requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __sub__( self , lowerCAmelCase__ ) -> Matrix: '''simple docstring''' if self.order != other.order: raise ValueError("Subtraction requires matrices of the same order" ) return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )] for i in range(self.num_rows ) ] ) def __mul__( self , lowerCAmelCase__ ) -> Matrix: '''simple docstring''' if isinstance(lowerCAmelCase__ , (int, float) ): return Matrix( [[int(element * other ) for element in row] for row in self.rows] ) elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): if self.num_columns != other.num_rows: raise ValueError( "The number of columns in the first matrix must " "be equal to the number of rows in the second" ) return Matrix( [ [Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()] for row in self.rows ] ) else: raise TypeError( "A Matrix can only be multiplied by an int, float, or another matrix" ) def __pow__( self , lowerCAmelCase__ ) -> Matrix: '''simple docstring''' if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): raise TypeError("A Matrix can only be raised to the power of an int" ) if not self.is_square: raise ValueError("Only square matrices can be raised to a power" ) if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( "Only invertable matrices can be raised to a negative power" ) a__ : List[str] =self for _ in range(other - 1 ): result *= self return result @classmethod def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: '''simple docstring''' return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
95
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict: if k in (0.0_4, 0.0_6): _a : List[str] = k _a : List[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Dict ) -> str: return str(self.k ) def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]: _a : Dict = cva.imread(UpperCAmelCase__ , 0 ) _a , _a : List[Any] = img.shape _a : list[list[int]] = [] _a : List[Any] = img.copy() _a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB ) _a , _a : Any = np.gradient(UpperCAmelCase__ ) _a : Tuple = dx**2 _a : Union[str, Any] = dy**2 _a : Union[str, Any] = dx * dy _a : int = 0.0_4 _a : List[str] = self.window_size // 2 for y in range(UpperCAmelCase__ , h - offset ): for x in range(UpperCAmelCase__ , w - offset ): _a : str = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : List[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Any = (wxx * wyy) - (wxy**2) _a : Tuple = wxx + wyy _a : Any = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _snake_case = HarrisCorner(0.04, 3) _snake_case , _snake_case = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
294
0
"""simple docstring""" import argparse import torch from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert from transformers.utils import logging logging.set_verbosity_info() def _snake_case ( lowercase__ , lowercase__ , lowercase__ ): # Initialise PyTorch model _lowerCamelCase : Dict = RemBertConfig.from_json_file(lowercase__ ) print('Building PyTorch model from configuration: {}'.format(str(lowercase__ ) ) ) _lowerCamelCase : Any = RemBertModel(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_rembert(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model print('Save PyTorch model to {}'.format(lowercase__ ) ) torch.save(model.state_dict() , lowercase__ ) if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--rembert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained RemBERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowercase__ = parser.parse_args() convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
96
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' # Check if the input is valid if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients _a , _a , _a : Any = equationa _a , _a , _a : Tuple = equationa # Calculate the determinants of the matrices _a : int = aa * ba - aa * ba _a : str = ca * ba - ca * ba _a : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _a : Dict = determinant_x / determinant _a : str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
294
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class lowercase ( unittest.TestCase ): """simple docstring""" def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=True , UpperCamelCase_=1 / 255 , UpperCamelCase_=True , ): '''simple docstring''' UpperCamelCase__ :Dict = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333} UpperCamelCase__ :str = parent UpperCamelCase__ :List[Any] = batch_size UpperCamelCase__ :Dict = num_channels UpperCamelCase__ :str = min_resolution UpperCamelCase__ :Optional[Any] = max_resolution UpperCamelCase__ :int = do_resize UpperCamelCase__ :Optional[Any] = size UpperCamelCase__ :Tuple = do_normalize UpperCamelCase__ :List[Any] = image_mean UpperCamelCase__ :Dict = image_std UpperCamelCase__ :Union[str, Any] = do_rescale UpperCamelCase__ :Union[str, Any] = rescale_factor UpperCamelCase__ :Union[str, Any] = do_pad def lowerCAmelCase__ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_=False ): '''simple docstring''' if not batched: UpperCamelCase__ :List[str] = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): UpperCamelCase__ , UpperCamelCase__ :List[str] = image.size else: UpperCamelCase__ , UpperCamelCase__ :List[Any] = image.shape[1], image.shape[2] if w < h: UpperCamelCase__ :int = int(self.size['''shortest_edge'''] * h / w ) UpperCamelCase__ :Dict = self.size['''shortest_edge'''] elif w > h: UpperCamelCase__ :int = self.size['''shortest_edge'''] UpperCamelCase__ :Tuple = int(self.size['''shortest_edge'''] * w / h ) else: UpperCamelCase__ :str = self.size['''shortest_edge'''] UpperCamelCase__ :str = self.size['''shortest_edge'''] else: UpperCamelCase__ :Any = [] for image in image_inputs: UpperCamelCase__ , UpperCamelCase__ :Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCamelCase__ :List[Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] UpperCamelCase__ :Optional[int] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowercase ( A__ , unittest.TestCase ): """simple docstring""" _a = ConditionalDetrImageProcessor if is_vision_available() else None def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessingTester(self ) @property def lowerCAmelCase__ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) UpperCamelCase__ :List[str] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase_ ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase_ ) def lowerCAmelCase__ ( self ): '''simple docstring''' pass def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase__ :List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input UpperCamelCase__ :Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input UpperCamelCase__ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :List[Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :Union[str, Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :str = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase__ :Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input UpperCamelCase__ :str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Dict = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCamelCase__ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values UpperCamelCase__ , UpperCamelCase__ :Optional[int] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Optional[int] = json.loads(f.read() ) UpperCamelCase__ :Any = {'''image_id''': 39769, '''annotations''': target} # encode them UpperCamelCase__ :str = ConditionalDetrImageProcessor.from_pretrained('''microsoft/conditional-detr-resnet-50''' ) UpperCamelCase__ :List[Any] = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :List[str] = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :str = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :str = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[Any] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :int = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :List[str] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify orig_size UpperCamelCase__ :Tuple = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :Union[str, Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) ) @slow def lowerCAmelCase__ ( self ): '''simple docstring''' UpperCamelCase__ :List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCamelCase__ :Tuple = json.loads(f.read() ) UpperCamelCase__ :List[str] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target} UpperCamelCase__ :Any = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCamelCase__ :List[Any] = ConditionalDetrImageProcessor(format='''coco_panoptic''' ) UpperCamelCase__ :Dict = image_processing(images=UpperCamelCase_ , annotations=UpperCamelCase_ , masks_path=UpperCamelCase_ , return_tensors='''pt''' ) # verify pixel values UpperCamelCase__ :str = torch.Size([1, 3, 800, 1066] ) self.assertEqual(encoding['''pixel_values'''].shape , UpperCamelCase_ ) UpperCamelCase__ :Optional[int] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCamelCase_ , atol=1e-4 ) ) # verify area UpperCamelCase__ :Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCamelCase_ ) ) # verify boxes UpperCamelCase__ :Any = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCamelCase_ ) UpperCamelCase__ :List[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCamelCase_ , atol=1e-3 ) ) # verify image_id UpperCamelCase__ :List[str] = torch.tensor([39769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCamelCase_ ) ) # verify is_crowd UpperCamelCase__ :Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCamelCase_ ) ) # verify class_labels UpperCamelCase__ :str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCamelCase_ ) ) # verify masks UpperCamelCase__ :Optional[Any] = 822873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCamelCase_ ) # verify orig_size UpperCamelCase__ :List[str] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCamelCase_ ) ) # verify size UpperCamelCase__ :List[Any] = torch.tensor([800, 1066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCamelCase_ ) )
97
"""simple docstring""" _snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}] _snake_case = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
294
0
"""simple docstring""" from __future__ import annotations import typing from collections import Counter def a_ ( lowerCamelCase ): UpperCAmelCase__ = Counter() for base in range(1 , max_perimeter + 1 ): for perpendicular in range(lowerCamelCase , max_perimeter + 1 ): UpperCAmelCase__ = (base * base + perpendicular * perpendicular) ** 0.5 if hypotenuse == int(lowerCamelCase ): UpperCAmelCase__ = int(base + perpendicular + hypotenuse ) if perimeter > max_perimeter: continue triplets[perimeter] += 1 return triplets def a_ ( lowerCamelCase = 1_0_0_0 ): UpperCAmelCase__ = pythagorean_triple(lowerCamelCase ) return triplets.most_common(1 )[0][0] if __name__ == "__main__": print(F"""Perimeter {solution()} has maximum solutions""")
98
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = '''mvp''' UpperCamelCase : Union[str, Any] = ['''past_key_values'''] UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]: _a : Any = vocab_size _a : Any = max_position_embeddings _a : Union[str, Any] = d_model _a : List[str] = encoder_ffn_dim _a : List[Any] = encoder_layers _a : Dict = encoder_attention_heads _a : Tuple = decoder_ffn_dim _a : List[Any] = decoder_layers _a : Optional[Any] = decoder_attention_heads _a : Optional[Any] = dropout _a : str = attention_dropout _a : Dict = activation_dropout _a : Any = activation_function _a : Tuple = init_std _a : Dict = encoder_layerdrop _a : Optional[int] = decoder_layerdrop _a : Optional[Any] = classifier_dropout _a : List[Any] = use_cache _a : Dict = encoder_layers _a : str = scale_embedding # scale factor will be sqrt(d_model) if True _a : int = use_prompt _a : Dict = prompt_length _a : Dict = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ): _a : List[str] = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ """The config can simply be saved and uploaded again to be fixed.""" )
294
0
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowercase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. lowercase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) lowercase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowercase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") lowercase : Optional[Any] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( A__ ) -> Optional[Any]: a__ : List[str] = None # source code of `config_class` a__ : Tuple = inspect.getsource(A__ ) a__ : Any = _re_checkpoint.findall(A__ ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith('/' ): a__ : Union[str, Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link a__ : str = F'https://huggingface.co/{ckpt_name}' if ckpt_link == ckpt_link_from_name: a__ : Optional[int] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: a__ : Tuple = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue a__ : Dict = get_checkpoint_from_config_class(A__ ) a__ : List[str] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(A__ ) if len(A__ ) > 0: a__ : Dict = '\n'.join(sorted(A__ ) ) raise ValueError(F'The following configurations don\'t contain any valid checkpoint:\n{message}' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
99
"""simple docstring""" import logging import os from typing import List, TextIO, Union from conllu import parse_incr from utils_ner import InputExample, Split, TokenClassificationTask _snake_case = logging.getLogger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=-1 ) -> Tuple: # in NER datasets, the last column is usually reserved for NER label _a : Optional[int] = label_idx def _lowercase ( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : Any = mode.value _a : Optional[int] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : int = 1 _a : int = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: _a : str = [] _a : str = [] for line in f: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 _a : List[str] = [] _a : str = [] else: _a : List[Any] = line.split(""" """ ) words.append(splits[0] ) if len(UpperCAmelCase__ ) > 1: labels.append(splits[self.label_idx].replace("""\n""" , """""" ) ) else: # Examples could have no label for mode = "test" labels.append("""O""" ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) return examples def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Union[str, Any]: _a : List[str] = 0 for line in test_input_reader: if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n": writer.write(UpperCAmelCase__ ) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: _a : int = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n""" writer.write(UpperCAmelCase__ ) else: logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : List[Any] = f.read().splitlines() if "O" not in labels: _a : Union[str, Any] = ["""O"""] + labels return labels else: return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] class UpperCamelCase ( snake_case_ ): def __init__( self : Union[str, Any] ) -> List[str]: # in CONLL2003 dataset chunk column is second-to-last super().__init__(label_idx=-2 ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: _a : Optional[int] = f.read().splitlines() if "O" not in labels: _a : Optional[Any] = ["""O"""] + labels return labels else: return [ "O", "B-ADVP", "B-INTJ", "B-LST", "B-PRT", "B-NP", "B-SBAR", "B-VP", "B-ADJP", "B-CONJP", "B-PP", "I-ADVP", "I-INTJ", "I-LST", "I-PRT", "I-NP", "I-SBAR", "I-VP", "I-ADJP", "I-CONJP", "I-PP", ] class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[Split, str] ) -> List[InputExample]: if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): _a : List[Any] = mode.value _a : Union[str, Any] = os.path.join(UpperCAmelCase__ , f"""{mode}.txt""" ) _a : List[str] = 1 _a : Optional[Any] = [] with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f: for sentence in parse_incr(UpperCAmelCase__ ): _a : List[Any] = [] _a : Any = [] for token in sentence: words.append(token["""form"""] ) labels.append(token["""upos"""] ) assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ) if words: examples.append(InputExample(guid=f"""{mode}-{guid_index}""" , words=UpperCAmelCase__ , labels=UpperCAmelCase__ ) ) guid_index += 1 return examples def _lowercase ( self : Tuple , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : TextIO , UpperCAmelCase__ : List ) -> Dict: _a : Optional[Any] = 0 for sentence in parse_incr(UpperCAmelCase__ ): _a : List[str] = preds_list[example_id] _a : str = """""" for token in sentence: out += f"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """ out += "\n" writer.write(UpperCAmelCase__ ) example_id += 1 def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: if path: with open(UpperCAmelCase__ , """r""" ) as f: return f.read().splitlines() else: return [ "ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X", ]
294
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import ( BaseOutput, OptionalDependencyNotAvailable, is_flax_available, is_k_diffusion_available, is_k_diffusion_version, is_onnx_available, is_torch_available, is_transformers_available, is_transformers_version, ) @dataclass class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : Union[List[PIL.Image.Image], np.ndarray] __lowercase : Optional[List[bool]] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline from .pipeline_stable_unclip import StableUnCLIPPipeline from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline from .safety_checker import StableDiffusionSafetyChecker from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline else: from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionPixaPixZeroPipeline, ) else: from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline try: if not ( is_torch_available() and is_transformers_available() and is_k_diffusion_available() and is_k_diffusion_version(">=", "0.0.12") ): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline try: if not (is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_onnx_objects import * # noqa F403 else: from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline if is_transformers_available() and is_flax_available(): import flax @flax.struct.dataclass class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" __lowercase : np.ndarray __lowercase : List[bool] from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
100
"""simple docstring""" from __future__ import annotations import time import numpy as np _snake_case = [8, 5, 9, 7] _snake_case = [ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] _snake_case = [ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase : def __init__( self : List[Any] , UpperCAmelCase__ : list[int] , UpperCAmelCase__ : list[list[int]] , UpperCAmelCase__ : list[list[int]] , ) -> None: _a : List[str] = claim_vector _a : List[Any] = allocated_resources_table _a : Union[str, Any] = maximum_claim_table def _lowercase ( self : Tuple ) -> list[int]: return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _lowercase ( self : int ) -> list[int]: return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _lowercase ( self : List[str] ) -> list[list[int]]: return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(UpperCAmelCase__ ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _lowercase ( self : Optional[Any] ) -> dict[int, list[int]]: return {self.__need().index(UpperCAmelCase__ ): i for i in self.__need()} def _lowercase ( self : Dict , **UpperCAmelCase__ : Optional[Any] ) -> None: _a : List[Any] = self.__need() _a : Optional[int] = self.__allocated_resources_table _a : str = self.__available_resources() _a : Optional[Any] = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print("""_""" * 50 + """\n""" ) while need_list: _a : int = False for each_need in need_list: _a : Optional[int] = True for index, need in enumerate(UpperCAmelCase__ ): if need > available_resources[index]: _a : List[Any] = False break if execution: _a : str = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: _a : Any = original_need_index print(f"""Process {process_number + 1} is executing.""" ) # remove the process run from stack need_list.remove(UpperCAmelCase__ ) # update available/freed resources stack _a : Union[str, Any] = np.array(UpperCAmelCase__ ) + np.array( alloc_resources_table[process_number] ) print( """Updated available resource stack for processes: """ + """ """.join([str(UpperCAmelCase__ ) for x in available_resources] ) ) break if safe: print("""The process is in a safe state.\n""" ) else: print("""System in unsafe state. Aborting...\n""" ) break def _lowercase ( self : Any ) -> Optional[int]: print(""" """ * 9 + """Allocated Resource Table""" ) for item in self.__allocated_resources_table: print( f"""P{self.__allocated_resources_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print(""" """ * 9 + """System Resource Table""" ) for item in self.__maximum_claim_table: print( f"""P{self.__maximum_claim_table.index(UpperCAmelCase__ ) + 1}""" + """ """.join(f"""{it:>8}""" for it in item ) + """\n""" ) print( """Current Usage by Active Processes: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__claim_vector ) ) print( """Initial Available Resources: """ + """ """.join(str(UpperCAmelCase__ ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
294
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bert import BertTokenizer lowercase__ :List[Any] = logging.get_logger(__name__) lowercase__ :Optional[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowercase__ :Union[str, Any] = { "vocab_file": { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt", "bert-base-multilingual-uncased": ( "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt" ), "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt" ), "bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt" ), "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt", "bert-base-german-dbmdz-uncased": ( "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json", "bert-base-multilingual-uncased": ( "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json" ), "bert-base-multilingual-cased": ( "https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json" ), "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json", "bert-large-uncased-whole-word-masking": ( "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json" ), "bert-large-cased-whole-word-masking": ( "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json" ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json" ), "bert-large-cased-whole-word-masking-finetuned-squad": ( "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json" ), "bert-base-cased-finetuned-mrpc": ( "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json" ), "bert-base-german-dbmdz-cased": ( "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json" ), "bert-base-german-dbmdz-uncased": ( "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json" ), "TurkuNLP/bert-base-finnish-cased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json" ), "TurkuNLP/bert-base-finnish-uncased-v1": ( "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json" ), "wietsedv/bert-base-dutch-cased": ( "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json" ), }, } lowercase__ :str = { "bert-base-uncased": 512, "bert-large-uncased": 512, "bert-base-cased": 512, "bert-large-cased": 512, "bert-base-multilingual-uncased": 512, "bert-base-multilingual-cased": 512, "bert-base-chinese": 512, "bert-base-german-cased": 512, "bert-large-uncased-whole-word-masking": 512, "bert-large-cased-whole-word-masking": 512, "bert-large-uncased-whole-word-masking-finetuned-squad": 512, "bert-large-cased-whole-word-masking-finetuned-squad": 512, "bert-base-cased-finetuned-mrpc": 512, "bert-base-german-dbmdz-cased": 512, "bert-base-german-dbmdz-uncased": 512, "TurkuNLP/bert-base-finnish-cased-v1": 512, "TurkuNLP/bert-base-finnish-uncased-v1": 512, "wietsedv/bert-base-dutch-cased": 512, } lowercase__ :Dict = { "bert-base-uncased": {"do_lower_case": True}, "bert-large-uncased": {"do_lower_case": True}, "bert-base-cased": {"do_lower_case": False}, "bert-large-cased": {"do_lower_case": False}, "bert-base-multilingual-uncased": {"do_lower_case": True}, "bert-base-multilingual-cased": {"do_lower_case": False}, "bert-base-chinese": {"do_lower_case": False}, "bert-base-german-cased": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking": {"do_lower_case": True}, "bert-large-cased-whole-word-masking": {"do_lower_case": False}, "bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True}, "bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False}, "bert-base-cased-finetuned-mrpc": {"do_lower_case": False}, "bert-base-german-dbmdz-cased": {"do_lower_case": False}, "bert-base-german-dbmdz-uncased": {"do_lower_case": True}, "TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False}, "TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True}, "wietsedv/bert-base-dutch-cased": {"do_lower_case": False}, } class lowercase ( SCREAMING_SNAKE_CASE__ ): lowercase_ : str =VOCAB_FILES_NAMES lowercase_ : Dict =PRETRAINED_VOCAB_FILES_MAP lowercase_ : Optional[int] =PRETRAINED_INIT_CONFIGURATION lowercase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase_ : Any =BertTokenizer def __init__( self ,A__=None ,A__=None ,A__=True ,A__="[UNK]" ,A__="[SEP]" ,A__="[PAD]" ,A__="[CLS]" ,A__="[MASK]" ,A__=True ,A__=None ,**A__ ,): super().__init__( A__ ,tokenizer_file=A__ ,do_lower_case=A__ ,unk_token=A__ ,sep_token=A__ ,pad_token=A__ ,cls_token=A__ ,mask_token=A__ ,tokenize_chinese_chars=A__ ,strip_accents=A__ ,**A__ ,) lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('''lowercase''' ,A__) != do_lower_case or normalizer_state.get('''strip_accents''' ,A__) != strip_accents or normalizer_state.get('''handle_chinese_chars''' ,A__) != tokenize_chinese_chars ): lowercase = getattr(A__ ,normalizer_state.pop('''type''')) lowercase = do_lower_case lowercase = strip_accents lowercase = tokenize_chinese_chars lowercase = normalizer_class(**A__) lowercase = do_lower_case def A__ ( self ,A__ ,A__=None): lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def A__ ( self ,A__ ,A__ = None): lowercase = [self.sep_token_id] lowercase = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def A__ ( self ,A__ ,A__ = None): lowercase = self._tokenizer.model.save(A__ ,name=A__) return tuple(A__)
101
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar _snake_case = TypeVar('_T') class UpperCamelCase ( Generic[_T] ): def __init__( self : Optional[int] , UpperCAmelCase__ : Iterable[_T] | None = None ) -> None: _a : list[_T] = list(iterable or [] ) _a : list[_T] = [] def __len__( self : str ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self : List[str] ) -> str: return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})""" def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : _T ) -> None: self._stacka.append(UpperCAmelCase__ ) def _lowercase ( self : Optional[Any] ) -> _T: _a : Any = self._stacka.pop _a : Union[str, Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError("""Queue is empty""" ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
294
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) SCREAMING_SNAKE_CASE : Tuple = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : Dict = ["""BeitFeatureExtractor"""] SCREAMING_SNAKE_CASE : Optional[Any] = ["""BeitImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ """BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """BeitForImageClassification""", """BeitForMaskedImageModeling""", """BeitForSemanticSegmentation""", """BeitModel""", """BeitPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE : int = [ """FlaxBeitForImageClassification""", """FlaxBeitForMaskedImageModeling""", """FlaxBeitModel""", """FlaxBeitPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
102
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _snake_case = False class UpperCamelCase ( unittest.TestCase ): pass @nightly @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Optional[Any] ) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Tuple ) -> List[Any]: _a : Tuple = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : List[str] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Optional[Any] = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(UpperCAmelCase__ ) _a : Dict = VersatileDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : Optional[Any] = generator.manual_seed(0 ) _a : str = pipe.dual_guided( prompt="""first prompt""" , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Optional[int] = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa ) pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) _a : int = """cyberpunk 2077""" _a : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" ) _a : Tuple = torch.manual_seed(0 ) _a : Any = pipe.dual_guided( prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , text_to_image_strength=0.7_5 , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images _a : List[str] = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[int] = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : int = """A painting of a squirrel eating a burger """ _a : Tuple = torch.manual_seed(0 ) _a : Union[str, Any] = pipe.text_to_image( prompt=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images _a : int = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : int = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _a : str = pipe.image_variation(UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type="""numpy""" ).images _a : str = image[0, 253:256, 253:256, -1] assert image.shape == (1, 512, 512, 3) _a : Optional[Any] = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
294
0
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class __snake_case ( unittest.TestCase ): _a = JukeboxTokenizer _a = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def UpperCAmelCase__ ( self : str): import torch lowerCAmelCase_ : List[Any] = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') lowerCAmelCase_ : Any = tokenizer(**self.metas)['''input_ids'''] # fmt: off lowerCAmelCase_ : List[str] = [ torch.tensor([[ 0, 0, 0, 7_1_6_9, 5_0_7, 9, 7_6, 3_9, 3_1, 4_6, 7_6, 2_7, 7_6, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_6, 3_2, 4_4, 4_1, 3_9, 7_6, 2_7, 4_0, 7_6, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_6, 3_8, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 4_1, 7_6, 4_5, 2_7, 3_5, 3_0, 7_6, 7_1, 2_0, 4_9, 4_1, 7_6, 4_8, 2_7, 4_5, 4_6, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_6, 3_8, 3_1, 3_3, 4_5, 7_6, 4_1, 3_2, 7_6, 4_5, 4_6, 4_1, 4_0, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_9, 4_6, 2_7, 4_0, 3_0, 7_6, 3_5, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 6_3, 7_6, 1_4, 3_1, 2_7, 4_4, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_5, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 8, 2_7, 3_8, 3_2, 7_6, 4_5, 4_7, 4_0, 3_7, 7_6, 2_7, 7_6, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_6, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_6, 3_8, 3_5, 3_1, 4_5, 6_4, 7_6, 4_9, 3_4, 4_1, 4_5, 3_1, 7_6, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_6, 3_8, 3_5, 4_2, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_5, 4_0, 3_1, 3_1, 4_4, 7_6, 4_1, 3_2, 7_6, 2_9, 4_1, 3_8, 3_0, 7_6, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_5, 4_6, 4_5, 7_6, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_6, 4_9, 3_1, 3_8, 3_8, 7_6, 4_6, 3_4, 4_1, 4_5, 3_1, 7_6, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_6, 4_4, 3_1, 2_7, 3_0, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_3, 3_4, 3_5, 2_9, 3_4, 7_6, 5_1, 3_1, 4_6, 7_6, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_6, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_6, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_4, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_6, 4_6, 3_4, 3_1, 3_9, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_4, 3_1, 2_7, 4_4, 4_6, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 3_2, 3_1, 3_0, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1, 4_0, 3_0, 7_6, 4_1, 4_0, 7_6, 4_6, 3_4, 3_1, 7_6, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_6, 4_6, 3_4, 3_1, 4_5, 3_1, 7_6, 4_9, 4_1, 4_4, 3_0, 4_5, 7_6, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_3, 5_1, 7_6, 4_0, 2_7, 3_9, 3_1, 7_6, 3_5, 4_5, 7_6, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_6, 1_1, 3_5, 4_0, 3_3, 7_6, 4_1, 3_2, 7_6, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_2, 4_1, 4_1, 3_7, 7_6, 4_1, 4_0, 7_6, 3_9, 5_1, 7_6, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_6, 5_1, 3_1, 7_6, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_6, 2_7, 4_0, 3_0, 7_6, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_6, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_6, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_6, 1_8, 4_1, 4_7, 4_0, 3_0, 7_6, 4_6, 3_4, 3_1, 7_6, 3_0, 3_1, 2_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 1_5, 3_2, 7_6, 4_6, 3_4, 2_7, 4_6, 7_6, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_6, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_6, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_6, 2_7, 4_0, 3_0, 7_6, 2_8, 2_7, 4_4, 3_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 2_0, 3_4, 3_1, 7_6, 3_8, 4_1, 4_0, 3_1, 7_6, 2_7, 4_0, 3_0, 7_6, 3_8, 3_1, 4_8, 3_1, 3_8, 7_6, 4_5, 2_7, 4_0, 3_0, 4_5, 7_6, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_6, 3_2, 2_7, 4_4, 7_6, 2_7, 4_9, 2_7, 5_1, 7_8, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6, 7_6]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def UpperCAmelCase__ ( self : Any): import torch lowerCAmelCase_ : str = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') lowerCAmelCase_ : List[Any] = tokenizer(**self.metas)['''input_ids'''] # fmt: off lowerCAmelCase_ : Optional[int] = [ torch.tensor([[ 0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1, 9, 7_7, 3_9, 3_1, 4_6, 7_7, 2_7, 7_7, 4_6, 4_4, 2_7, 4_8, 3_1, 3_8, 3_8, 3_1, 4_4, 7_7, 3_2, 4_4, 4_1, 3_9, 7_7, 2_7, 4_0, 7_7, 2_7, 4_0, 4_6, 3_5, 4_3, 4_7, 3_1, 7_7, 3_8, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 4_1, 7_7, 4_5, 2_7, 3_5, 3_0, 7_7, 7_2, 2_0, 4_9, 4_1, 7_7, 4_8, 2_7, 4_5, 4_6, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 4_4, 4_7, 4_0, 3_7, 3_8, 3_1, 4_5, 4_5, 7_7, 3_8, 3_1, 3_3, 4_5, 7_7, 4_1, 3_2, 7_7, 4_5, 4_6, 4_1, 4_0, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_9, 4_6, 2_7, 4_0, 3_0, 7_7, 3_5, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 4_5, 3_1, 4_4, 4_6, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 6_3, 7_7, 1_4, 3_1, 2_7, 4_4, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_5, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 8, 2_7, 3_8, 3_2, 7_7, 4_5, 4_7, 4_0, 3_7, 7_7, 2_7, 7_7, 4_5, 3_4, 2_7, 4_6, 4_6, 3_1, 4_4, 3_1, 3_0, 7_7, 4_8, 3_5, 4_5, 2_7, 3_3, 3_1, 7_7, 3_8, 3_5, 3_1, 4_5, 6_4, 7_7, 4_9, 3_4, 4_1, 4_5, 3_1, 7_7, 3_2, 4_4, 4_1, 4_9, 4_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_9, 4_4, 3_5, 4_0, 3_7, 3_8, 3_1, 3_0, 7_7, 3_8, 3_5, 4_2, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_5, 4_0, 3_1, 3_1, 4_4, 7_7, 4_1, 3_2, 7_7, 2_9, 4_1, 3_8, 3_0, 7_7, 2_9, 4_1, 3_9, 3_9, 2_7, 4_0, 3_0, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_5, 4_6, 4_5, 7_7, 4_5, 2_9, 4_7, 3_8, 4_2, 4_6, 4_1, 4_4, 7_7, 4_9, 3_1, 3_8, 3_8, 7_7, 4_6, 3_4, 4_1, 4_5, 3_1, 7_7, 4_2, 2_7, 4_5, 4_5, 3_5, 4_1, 4_0, 4_5, 7_7, 4_4, 3_1, 2_7, 3_0, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_3, 3_4, 3_5, 2_9, 3_4, 7_7, 5_1, 3_1, 4_6, 7_7, 4_5, 4_7, 4_4, 4_8, 3_5, 4_8, 3_1, 6_4, 7_7, 4_5, 4_6, 2_7, 3_9, 4_2, 3_1, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 3_8, 3_5, 3_2, 3_1, 3_8, 3_1, 4_5, 4_5, 7_7, 4_6, 3_4, 3_5, 4_0, 3_3, 4_5, 6_4, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_4, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_9, 4_1, 2_9, 3_7, 3_1, 3_0, 7_7, 4_6, 3_4, 3_1, 3_9, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_4, 3_1, 2_7, 4_4, 4_6, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 3_2, 3_1, 3_0, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1, 4_0, 3_0, 7_7, 4_1, 4_0, 7_7, 4_6, 3_4, 3_1, 7_7, 4_2, 3_1, 3_0, 3_1, 4_5, 4_6, 2_7, 3_8, 6_4, 7_7, 4_6, 3_4, 3_1, 4_5, 3_1, 7_7, 4_9, 4_1, 4_4, 3_0, 4_5, 7_7, 2_7, 4_2, 4_2, 3_1, 2_7, 4_4, 6_5, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_3, 5_1, 7_7, 4_0, 2_7, 3_9, 3_1, 7_7, 3_5, 4_5, 7_7, 1_5, 5_2, 5_1, 3_9, 2_7, 4_0, 3_0, 3_5, 2_7, 4_5, 6_4, 7_7, 1_1, 3_5, 4_0, 3_3, 7_7, 4_1, 3_2, 7_7, 1_1, 3_5, 4_0, 3_3, 4_5, 6_6, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_2, 4_1, 4_1, 3_7, 7_7, 4_1, 4_0, 7_7, 3_9, 5_1, 7_7, 2_3, 4_1, 4_4, 3_7, 4_5, 6_4, 7_7, 5_1, 3_1, 7_7, 1_3, 3_5, 3_3, 3_4, 4_6, 5_1, 6_4, 7_7, 2_7, 4_0, 3_0, 7_7, 3_0, 3_1, 4_5, 4_2, 2_7, 3_5, 4_4, 6_7, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_4, 4_1, 4_6, 3_4, 3_5, 4_0, 3_3, 7_7, 2_8, 3_1, 4_5, 3_5, 3_0, 3_1, 7_7, 4_4, 3_1, 3_9, 2_7, 3_5, 4_0, 4_5, 6_3, 7_7, 1_8, 4_1, 4_7, 4_0, 3_0, 7_7, 4_6, 3_4, 3_1, 7_7, 3_0, 3_1, 2_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 1_5, 3_2, 7_7, 4_6, 3_4, 2_7, 4_6, 7_7, 2_9, 4_1, 3_8, 4_1, 4_5, 4_5, 2_7, 3_8, 7_7, 2_3, 4_4, 3_1, 2_9, 3_7, 6_4, 7_7, 2_8, 4_1, 4_7, 4_0, 3_0, 3_8, 3_1, 4_5, 4_5, 7_7, 2_7, 4_0, 3_0, 7_7, 2_8, 2_7, 4_4, 3_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 2_0, 3_4, 3_1, 7_7, 3_8, 4_1, 4_0, 3_1, 7_7, 2_7, 4_0, 3_0, 7_7, 3_8, 3_1, 4_8, 3_1, 3_8, 7_7, 4_5, 2_7, 4_0, 3_0, 4_5, 7_7, 4_5, 4_6, 4_4, 3_1, 4_6, 2_9, 3_4, 7_7, 3_2, 2_7, 4_4, 7_7, 2_7, 4_9, 2_7, 5_1, 7_9, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7, 7_7]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_0_6_9, 1_1, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
103
"""simple docstring""" import json import os from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'vocab.json'} _snake_case = { 'vocab_file': { 'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json', } } _snake_case = {'mgp-str': 27} class UpperCamelCase ( snake_case_ ): UpperCamelCase : List[str] = VOCAB_FILES_NAMES UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]="[GO]" , UpperCAmelCase__ : Tuple="[GO]" , UpperCAmelCase__ : Optional[int]="[s]" , UpperCAmelCase__ : int="[GO]" , **UpperCAmelCase__ : Dict ) -> int: super().__init__( unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , **UpperCAmelCase__ , ) with open(UpperCAmelCase__ , encoding="""utf-8""" ) as vocab_handle: _a : int = json.load(UpperCAmelCase__ ) _a : Optional[int] = {v: k for k, v in self.vocab.items()} @property def _lowercase ( self : Dict ) -> Union[str, Any]: return len(self.vocab ) def _lowercase ( self : Union[str, Any] ) -> str: return dict(self.vocab , **self.added_tokens_encoder ) def _lowercase ( self : Dict , UpperCAmelCase__ : str ) -> Union[str, Any]: _a : Tuple = [] for s in text: char_tokens.extend(UpperCAmelCase__ ) return char_tokens def _lowercase ( self : List[Any] , UpperCAmelCase__ : str ) -> Dict: return self.vocab.get(UpperCAmelCase__ , self.vocab.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : Tuple ) -> List[Any]: return self.decoder.get(UpperCAmelCase__ ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCAmelCase__ ) ) return _a : Tuple = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase__ , ensure_ascii=UpperCAmelCase__ ) + """\n""" ) return (vocab_file,)
294
0
'''simple docstring''' import logging import re import pytorch_quantization import pytorch_quantization.nn as quant_nn import torch from pytorch_quantization import calib from pytorch_quantization.tensor_quant import QuantDescriptor lowerCAmelCase__ = logging.getLogger(__name__) lowerCAmelCase__ = 50 # max width of layer names lowerCAmelCase__ = 70 # max width of quantizer names def _A ( A__ ): """simple docstring""" __lowercase = parser.add_argument_group('''quant_trainer arguments''' ) group.add_argument('''--wprec''' , type=A__ , default=8 , help='''weight precision''' ) group.add_argument('''--aprec''' , type=A__ , default=8 , help='''activation precision''' ) group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' ) group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' ) group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' ) group.add_argument('''--quant-disable-keyword''' , type=A__ , nargs='''+''' , help='''disable quantizers by keyword''' ) group.add_argument('''--quant-disable-layer-module''' , type=A__ , help='''disable quantizers by keyword under layer.''' ) group.add_argument('''--quant-enable-layer-module''' , type=A__ , help='''enable quantizers by keyword under layer''' ) group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' ) group.add_argument('''--percentile''' , default=A__ , type=A__ , help='''percentile for PercentileCalibrator''' ) group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' ) group.add_argument('''--clip-gelu''' , metavar='''N''' , type=A__ , help='''clip gelu output maximum value to N''' ) group.add_argument( '''--recalibrate-weights''' , action='''store_true''' , help=( '''recalibrate weight amaxes by taking the max of the weights.''' ''' amaxes will be computed with the current quantization granularity (axis).''' ) , ) def _A ( A__ ): """simple docstring""" if args.calibrator == "max": __lowercase = '''max''' elif args.calibrator == "percentile": if args.percentile is None: raise ValueError('''Specify --percentile when using percentile calibrator''' ) __lowercase = '''histogram''' elif args.calibrator == "mse": __lowercase = '''histogram''' else: raise ValueError(F"Invalid calibrator {args.calibrator}" ) __lowercase = QuantDescriptor(num_bits=args.aprec , calib_method=A__ ) __lowercase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) ) quant_nn.QuantLinear.set_default_quant_desc_input(A__ ) quant_nn.QuantLinear.set_default_quant_desc_weight(A__ ) def _A ( A__ , A__ , A__=False , A__=False ): """simple docstring""" logger.info('''Configuring Model for Quantization''' ) logger.info(F"using quantization package {pytorch_quantization.__file__}" ) if not calib: if args.quant_disable_embeddings: set_quantizer_by_name(A__ , ['''embeddings'''] , which='''weight''' , _disabled=A__ ) if args.quant_disable: set_quantizer_by_name(A__ , [''''''] , _disabled=A__ ) if args.quant_disable_keyword: set_quantizer_by_name(A__ , args.quant_disable_keyword , _disabled=A__ ) if args.quant_disable_layer_module: set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=A__ ) if args.quant_enable_layer_module: set_quantizer_by_name(A__ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=A__ ) if args.recalibrate_weights: recalibrate_weights(A__ ) if args.fuse_qkv: fuse_qkv(A__ , A__ ) if args.clip_gelu: clip_gelu(A__ , args.clip_gelu ) # if args.local_rank in [-1, 0] and not calib: print_quant_summary(A__ ) def _A ( A__ ): """simple docstring""" logger.info('''Enabling Calibration''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: module.disable_quant() module.enable_calib() else: module.disable() logger.info(F"{name:80}: {module}" ) def _A ( A__ , A__ ): """simple docstring""" logger.info('''Loading calibrated amax''' ) for name, module in model.named_modules(): if name.endswith('''_quantizer''' ): if module._calibrator is not None: if isinstance(module._calibrator , calib.MaxCalibrator ): module.load_calib_amax() else: module.load_calib_amax('''percentile''' , percentile=args.percentile ) module.enable_quant() module.disable_calib() else: module.enable() model.cuda() print_quant_summary(A__ ) def _A ( A__ , A__ ): """simple docstring""" def fusea(A__ , A__ , A__ ): for mod in [qq, qk, qv]: if not hasattr(A__ , '''_amax''' ): print(''' WARNING: NO AMAX BUFFER''' ) return __lowercase = qq._amax.detach().item() __lowercase = qk._amax.detach().item() __lowercase = qv._amax.detach().item() __lowercase = max(A__ , A__ , A__ ) qq._amax.fill_(A__ ) qk._amax.fill_(A__ ) qv._amax.fill_(A__ ) logger.info(F" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}" ) for name, mod in model.named_modules(): if name.endswith('''.attention.self''' ): logger.info(F"FUSE_QKV: {name:{name_width}}" ) fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer ) if args.quant_per_tensor: fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer ) def _A ( A__ , A__ ): """simple docstring""" for name, mod in model.named_modules(): if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ): __lowercase = mod._input_quantizer._amax.data.detach().item() mod._input_quantizer._amax.data.detach().clamp_(max=A__ ) __lowercase = mod._input_quantizer._amax.data.detach().item() logger.info(F"CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}" ) def _A ( A__ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(A__ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None: __lowercase = mod.weight.shape[0] __lowercase = mod._weight_quantizer._amax.detach() __lowercase = torch.ones(A__ , dtype=amax.dtype , device=amax.device ) * amax print(F"expanding {name} {amax} -> {mod._weight_quantizer._amax}" ) def _A ( A__ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(A__ , '''_weight_quantizer''' ): if not hasattr(mod.weight_quantizer , '''_amax''' ): print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' ) continue # determine which axes to reduce across # e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3) __lowercase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis ) __lowercase = set(range(len(mod.weight.size() ) ) ) - axis_set __lowercase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=A__ , keepdims=A__ ).detach() logger.info(F"RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}" ) __lowercase = amax def _A ( A__ , A__=25 , A__=180 , A__=None ): """simple docstring""" if ignore is None: __lowercase = [] elif not isinstance(A__ , A__ ): __lowercase = [ignore] __lowercase = 0 for name, mod in model.named_modules(): if not hasattr(A__ , '''weight''' ): continue __lowercase = max(A__ , len(A__ ) ) for name, mod in model.named_modules(): __lowercase = getattr(A__ , '''_input_quantizer''' , A__ ) __lowercase = getattr(A__ , '''_weight_quantizer''' , A__ ) if not hasattr(A__ , '''weight''' ): continue if type(A__ ) in ignore: continue if [True for s in ignore if type(A__ ) is str and s in name]: continue __lowercase = F"Act:{input_q.extra_repr()}" __lowercase = F"Wgt:{weight_q.extra_repr()}" __lowercase = F"{name:{name_width}} {act_str} {wgt_str}" if len(A__ ) <= line_width: logger.info(A__ ) else: logger.info(F"{name:{name_width}} {act_str}" ) logger.info(F"{' ':{name_width}} {wgt_str}" ) def _A ( A__ ): """simple docstring""" __lowercase = 0 for name, mod in model.named_modules(): if isinstance(A__ , pytorch_quantization.nn.TensorQuantizer ): print(F"{name:80} {mod}" ) count += 1 print(F"{count} TensorQuantizers found in model" ) def _A ( A__ , A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = getattr(A__ , A__ , A__ ) if quantizer_mod is not None: assert hasattr(A__ , A__ ) setattr(A__ , A__ , A__ ) else: logger.warning(F"{name} has no {quantizer}" ) def _A ( A__ , A__ , A__="both" , **A__ ): """simple docstring""" __lowercase = F"Warning: changing {which} quantizers of {name:{qname_width}}" for k, v in kwargs.items(): s += F" {k}={v}" if which in ["input", "both"]: set_quantizer(A__ , A__ , '''_input_quantizer''' , A__ , A__ ) if which in ["weight", "both"]: set_quantizer(A__ , A__ , '''_weight_quantizer''' , A__ , A__ ) logger.info(A__ ) def _A ( A__ , A__ , **A__ ): """simple docstring""" for name, mod in model.named_modules(): if hasattr(A__ , '''_input_quantizer''' ) or hasattr(A__ , '''_weight_quantizer''' ): for n in names: if re.search(A__ , A__ ): set_quantizers(A__ , A__ , **A__ ) elif name.endswith('''_quantizer''' ): for n in names: if re.search(A__ , A__ ): __lowercase = F"Warning: changing {name:{name_width}}" for k, v in kwargs.items(): s += F" {k}={v}" setattr(A__ , A__ , A__ ) logger.info(A__ )
104
"""simple docstring""" import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = (IPNDMScheduler,) UpperCamelCase : int = (('''num_inference_steps''', 50),) def _lowercase ( self : Union[str, Any] , **UpperCAmelCase__ : Tuple ) -> int: _a : Optional[int] = {"""num_train_timesteps""": 1000} config.update(**UpperCAmelCase__ ) return config def _lowercase ( self : Dict , UpperCAmelCase__ : Any=0 , **UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]: _a : Optional[int] = dict(self.forward_default_kwargs ) _a : Dict = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : Union[str, Any] = 0.1 * sample _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Any = dummy_past_residuals[:] if time_step is None: _a : str = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class.from_pretrained(UpperCAmelCase__ ) new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals _a : Optional[Any] = dummy_past_residuals[:] _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : str = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : Tuple ) -> List[str]: pass def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Optional[Any] ) -> List[Any]: _a : Optional[Any] = dict(self.forward_default_kwargs ) _a : Optional[Any] = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) _a : Optional[Any] = self.dummy_sample _a : List[Any] = 0.1 * sample _a : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: _a : Union[str, Any] = self.get_scheduler_config() _a : Optional[Any] = scheduler_class(**UpperCAmelCase__ ) scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) _a : Any = dummy_past_residuals[:] if time_step is None: _a : List[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCAmelCase__ ) _a : Any = scheduler_class.from_pretrained(UpperCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) _a : Optional[Any] = dummy_past_residuals[:] _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Tuple = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : int = new_scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self : str , **UpperCAmelCase__ : Any ) -> List[str]: _a : Optional[int] = self.scheduler_classes[0] _a : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase__ ) _a : Union[str, Any] = scheduler_class(**UpperCAmelCase__ ) _a : int = 10 _a : List[Any] = self.dummy_model() _a : str = self.dummy_sample_deter scheduler.set_timesteps(UpperCAmelCase__ ) for i, t in enumerate(scheduler.timesteps ): _a : str = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): _a : Union[str, Any] = model(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Any = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ).prev_sample return sample def _lowercase ( self : int ) -> str: _a : Dict = dict(self.forward_default_kwargs ) _a : int = kwargs.pop("""num_inference_steps""" , UpperCAmelCase__ ) for scheduler_class in self.scheduler_classes: _a : Optional[int] = self.get_scheduler_config() _a : Tuple = scheduler_class(**UpperCAmelCase__ ) _a : Tuple = self.dummy_sample _a : Optional[Any] = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCAmelCase__ , """set_timesteps""" ): scheduler.set_timesteps(UpperCAmelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCAmelCase__ , """set_timesteps""" ): _a : List[str] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) _a : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] _a : Optional[Any] = dummy_past_residuals[:] _a : Optional[Any] = scheduler.timesteps[5] _a : str = scheduler.timesteps[6] _a : Optional[int] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : Union[str, Any] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) _a : Tuple = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample _a : List[str] = scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self : List[str] ) -> List[str]: for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> List[str]: for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCAmelCase__ , time_step=UpperCAmelCase__ ) def _lowercase ( self : int ) -> List[Any]: _a : str = self.full_loop() _a : List[Any] = torch.mean(torch.abs(UpperCAmelCase__ ) ) assert abs(result_mean.item() - 2540529 ) < 10
294
0
"""simple docstring""" import argparse import copy def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->int: '''simple docstring''' a : int = {} with open(_lowercase ) as f: for line in f: if line.split()[0] not in dict_of_neighbours: a : Union[str, Any] = [] _list.append([line.split()[1], line.split()[2]] ) a : Dict = _list else: dict_of_neighbours[line.split()[0]].append( [line.split()[1], line.split()[2]] ) if line.split()[1] not in dict_of_neighbours: a : Optional[int] = [] _list.append([line.split()[0], line.split()[2]] ) a : Dict = _list else: dict_of_neighbours[line.split()[1]].append( [line.split()[0], line.split()[2]] ) return dict_of_neighbours def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] ) ->Optional[int]: '''simple docstring''' with open(_lowercase ) as f: a : Union[str, Any] = f.read(1 ) a : Optional[Any] = start_node a : List[str] = [] a : Union[str, Any] = start_node a : int = 0 while visiting not in first_solution: a : Optional[int] = 1_0000 for k in dict_of_neighbours[visiting]: if int(k[1] ) < int(_lowercase ) and k[0] not in first_solution: a : List[str] = k[1] a : Optional[int] = k[0] first_solution.append(_lowercase ) a : Dict = distance_of_first_solution + int(_lowercase ) a : Tuple = best_node first_solution.append(_lowercase ) a : Dict = 0 for k in dict_of_neighbours[first_solution[-2]]: if k[0] == start_node: break position += 1 a : Union[str, Any] = ( distance_of_first_solution + int(dict_of_neighbours[first_solution[-2]][position][1] ) - 1_0000 ) return first_solution, distance_of_first_solution def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[str] ) ->Dict: '''simple docstring''' a : int = [] for n in solution[1:-1]: a : int = solution.index(_lowercase ) for kn in solution[1:-1]: a : Optional[Any] = solution.index(_lowercase ) if n == kn: continue a : str = copy.deepcopy(_lowercase ) a : Any = kn a : Tuple = n a : Optional[int] = 0 for k in _tmp[:-1]: a : int = _tmp[_tmp.index(_lowercase ) + 1] for i in dict_of_neighbours[k]: if i[0] == next_node: a : Optional[Any] = distance + int(i[1] ) _tmp.append(_lowercase ) if _tmp not in neighborhood_of_solution: neighborhood_of_solution.append(_tmp ) a : List[str] = len(neighborhood_of_solution[0] ) - 1 neighborhood_of_solution.sort(key=lambda _lowercase : x[index_of_last_item_in_the_list] ) return neighborhood_of_solution def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str , _lowercase : str ) ->List[str]: '''simple docstring''' a : Optional[int] = 1 a : int = first_solution a : str = [] a : Any = distance_of_first_solution a : List[str] = solution while count <= iters: a : List[Any] = find_neighborhood(_lowercase , _lowercase ) a : Union[str, Any] = 0 a : List[Any] = neighborhood[index_of_best_solution] a : Dict = len(_lowercase ) - 1 a : Optional[Any] = False while not found: a : str = 0 while i < len(_lowercase ): if best_solution[i] != solution[i]: a : str = best_solution[i] a : int = solution[i] break a : Union[str, Any] = i + 1 if [first_exchange_node, second_exchange_node] not in tabu_list and [ second_exchange_node, first_exchange_node, ] not in tabu_list: tabu_list.append([first_exchange_node, second_exchange_node] ) a : Optional[Any] = True a : Optional[Any] = best_solution[:-1] a : Union[str, Any] = neighborhood[index_of_best_solution][best_cost_index] if cost < best_cost: a : Any = cost a : List[str] = solution else: a : int = index_of_best_solution + 1 a : Any = neighborhood[index_of_best_solution] if len(_lowercase ) >= size: tabu_list.pop(0 ) a : Any = count + 1 return best_solution_ever, best_cost def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple=None ) ->Union[str, Any]: '''simple docstring''' a : List[Any] = generate_neighbours(args.File ) a, a : Union[str, Any] = generate_first_solution( args.File , _lowercase ) a, a : Optional[Any] = tabu_search( _lowercase , _lowercase , _lowercase , args.Iterations , args.Size , ) print(F"""Best solution: {best_sol}, with total distance: {best_cost}.""" ) if __name__ == "__main__": a : Optional[int] = argparse.ArgumentParser(description='''Tabu Search''') parser.add_argument( '''-f''', '''--File''', type=str, help='''Path to the file containing the data''', required=True, ) parser.add_argument( '''-i''', '''--Iterations''', type=int, help='''How many iterations the algorithm should perform''', required=True, ) parser.add_argument( '''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True ) # Pass the arguments to main method main(parser.parse_args())
105
"""simple docstring""" import os import unicodedata from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import SPIECE_UNDERLINE, logging _snake_case = logging.get_logger(__name__) _snake_case = {'vocab_file': 'spiece.model'} _snake_case = { 'vocab_file': { 'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model', } } class UpperCamelCase ( snake_case_ ): def __init__( self : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Any="</s>" , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : int="<sep>" , UpperCAmelCase__ : Tuple="<pad>" , UpperCAmelCase__ : Any="<cls>" , UpperCAmelCase__ : Optional[Any]="<mask>" , UpperCAmelCase__ : int=["<eop>", "<eod>"] , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : List[str] , ) -> None: _a : Optional[int] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token _a : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=UpperCAmelCase__ , remove_space=UpperCAmelCase__ , keep_accents=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , ) _a : Optional[Any] = 3 _a : Tuple = do_lower_case _a : Tuple = remove_space _a : Tuple = keep_accents _a : Tuple = vocab_file _a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCAmelCase__ ) try: import jieba except ModuleNotFoundError as error: raise error.__class__( """You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """ """See https://pypi.org/project/jieba/ for installation.""" ) _a : int = jieba _a : Tuple = str.maketrans(""" \n""" , """\u2582\u2583""" ) @property # Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size def _lowercase ( self : Optional[Any] ) -> Any: return len(self.sp_model ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : Tuple ) -> List[str]: _a : Tuple = self.__dict__.copy() _a : Tuple = None return state def __setstate__( self : Any , UpperCAmelCase__ : Dict ) -> Dict: _a : Tuple = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): _a : Tuple = {} _a : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) -> Dict: if self.remove_space: _a : Optional[int] = """ """.join(inputs.strip().split() ) else: _a : List[Any] = inputs _a : int = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" ) if not self.keep_accents: _a : Optional[Any] = unicodedata.normalize("""NFKD""" , UpperCAmelCase__ ) _a : Dict = """""".join([c for c in outputs if not unicodedata.combining(UpperCAmelCase__ )] ) if self.do_lower_case: _a : Union[str, Any] = outputs.lower() return outputs def _lowercase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]: _a : str = self.preprocess_text(UpperCAmelCase__ ) _a : Dict = self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ ) _a : Union[str, Any] = [] for piece in pieces: if len(UpperCAmelCase__ ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit(): _a : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase__ , """""" ) ) if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE: if len(cur_pieces[0] ) == 1: _a : Dict = cur_pieces[1:] else: _a : Any = cur_pieces[0][1:] cur_pieces.append(piece[-1] ) new_pieces.extend(UpperCAmelCase__ ) else: new_pieces.append(UpperCAmelCase__ ) return new_pieces def _lowercase ( self : Union[str, Any] , UpperCAmelCase__ : int ) -> int: return self.sp_model.PieceToId(UpperCAmelCase__ ) def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] ) -> Any: return self.sp_model.IdToPiece(UpperCAmelCase__ ) def _lowercase ( self : Any , UpperCAmelCase__ : Any ) -> Dict: _a : Dict = """""".join(UpperCAmelCase__ ).replace(UpperCAmelCase__ , """ """ ).strip() return out_string def _lowercase ( self : Any , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Optional[Any] = [self.sep_token_id] _a : Dict = [self.cls_token_id] if token_ids_a is None: return token_ids_a + sep + cls return token_ids_a + sep + token_ids_a + sep + cls def _lowercase ( self : Tuple , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ ) if token_ids_a is not None: return ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] return ([0] * len(UpperCAmelCase__ )) + [1, 1] def _lowercase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]: _a : Any = [self.sep_token_id] _a : Optional[Any] = [2] if token_ids_a is None: return len(token_ids_a + sep ) * [0] + cls_segment_id return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id def _lowercase ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]: if not os.path.isdir(UpperCAmelCase__ ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return _a : Union[str, Any] = os.path.join( UpperCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase__ , """wb""" ) as fi: _a : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase__ ) return (out_vocab_file,) def _lowercase ( self : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : List[str] ) -> List[str]: _a : Tuple = super()._decode(*UpperCAmelCase__ , **UpperCAmelCase__ ) _a : Optional[Any] = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" ) return text
294
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : int = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = ['''MaskFormerFeatureExtractor'''] __UpperCamelCase : Dict = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] __UpperCamelCase : Optional[Any] = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
106
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class UpperCamelCase ( nn.Module ): def __init__( self : Union[str, Any] ) -> int: super().__init__() _a : Optional[Any] = nn.Linear(3 , 4 ) _a : Tuple = nn.BatchNormad(4 ) _a : Dict = nn.Linear(4 , 5 ) def _lowercase ( self : Optional[int] , UpperCAmelCase__ : List[str] ) -> int: return self.lineara(self.batchnorm(self.lineara(UpperCAmelCase__ ) ) ) class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Any , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Optional[int] ) -> Optional[int]: return (args[0] + 1,) + args[1:], kwargs class UpperCamelCase ( snake_case_ ): def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]: return output + 1 class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Dict ) -> str: _a : List[Any] = ModelForTest() _a : str = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(test_model._hf_hook , UpperCAmelCase__ ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Optional[int] ) -> Optional[int]: _a : Dict = ModelForTest() _a : Dict = ModelHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ , append=UpperCAmelCase__ ) self.assertEqual(isinstance(test_model._hf_hook , UpperCAmelCase__ ) , UpperCAmelCase__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , """forward""" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["""x"""] ) remove_hook_from_module(UpperCAmelCase__ ) self.assertFalse(hasattr(UpperCAmelCase__ , """_hf_hook""" ) ) self.assertFalse(hasattr(UpperCAmelCase__ , """_old_forward""" ) ) def _lowercase ( self : Dict ) -> int: _a : str = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Optional[Any] = test_model(x + 1 ) _a : str = test_model(x + 2 ) _a : Union[str, Any] = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : int = PreForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : str = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : int = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Tuple = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-5 ) def _lowercase ( self : Tuple ) -> int: _a : Tuple = ModelForTest() _a : Union[str, Any] = torch.randn(2 , 3 ) _a : Optional[int] = test_model(UpperCAmelCase__ ) _a : int = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _a : List[Any] = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Dict = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _a : Any = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : Optional[int] = test_model(UpperCAmelCase__ ) assert torch.allclose(UpperCAmelCase__ , output + 2 , atol=1E-5 ) def _lowercase ( self : Dict ) -> Optional[Any]: _a : Any = ModelForTest() _a : List[Any] = torch.randn(2 , 3 ) _a : Dict = test_model(UpperCAmelCase__ ) _a : Any = PostForwardHook() add_hook_to_module(UpperCAmelCase__ , UpperCAmelCase__ ) _a : List[str] = test_model(UpperCAmelCase__ ) self.assertTrue(torch.allclose(UpperCAmelCase__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _a : Any = True _a : Union[str, Any] = test_model(UpperCAmelCase__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def _lowercase ( self : Optional[Any] ) -> str: _a : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _a : Optional[int] = torch.randn(2 , 3 ) _a : Any = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(UpperCAmelCase__ , AlignDevicesHook(io_same_device=UpperCAmelCase__ ) ) _a : str = torch.randn(2 , 3 ).to(0 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , torch.device(0 ) ) def _lowercase ( self : str ) -> Union[str, Any]: _a : int = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : List[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True} add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(hook_kwargs["""execution_device"""] ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : int = torch.randn(2 , 3 ) _a : str = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload _a : List[str] = { """execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True, """offload_buffers""": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**UpperCAmelCase__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**UpperCAmelCase__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Tuple = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Tuple ) -> List[str]: _a : str = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : Union[str, Any] = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Dict = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : List[Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook(UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , offload_buffers=UpperCAmelCase__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : List[str] = torch.randn(2 , 3 ) _a : Union[str, Any] = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) def _lowercase ( self : Dict ) -> str: _a : Optional[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # This will move each submodule on different devices _a : str = 0 if torch.cuda.is_available() else """cpu""" attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) # Buffers are not included in the offload by default, so are on the execution device _a : Union[str, Any] = torch.device(UpperCAmelCase__ ) self.assertEqual(model.batchnorm.running_mean.device , UpperCAmelCase__ ) _a : Union[str, Any] = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) # Now test with buffers included in the offload attach_align_device_hook( UpperCAmelCase__ , execution_device=UpperCAmelCase__ , offload=UpperCAmelCase__ , weights_map=model.state_dict() , offload_buffers=UpperCAmelCase__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""meta""" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("""meta""" ) ) _a : Any = torch.randn(2 , 3 ) _a : int = model(UpperCAmelCase__ ) self.assertEqual(output.device , UpperCAmelCase__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(UpperCAmelCase__ ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("""cpu""" ) ) self.assertEqual(model.lineara.weight.device , torch.device("""cpu""" ) )
294
0
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('4.31.0') __lowerCAmelCase : Any = logging.getLogger(__name__) @dataclass class snake_case__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) SCREAMING_SNAKE_CASE_ : bool = field( default=_UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , ) SCREAMING_SNAKE_CASE_ : str = field( default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , ) SCREAMING_SNAKE_CASE_ : bool = field( default=_UpperCamelCase , metadata={ """help""": ( """Will use the token generated when running `huggingface-cli login` (necessary to use this script """ """with private models).""" ) } , ) @dataclass class snake_case__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[str] = field(default=_UpperCamelCase , metadata={"""help""": """The input training data file (a text file)."""} ) SCREAMING_SNAKE_CASE_ : Optional[str] = field( default=_UpperCamelCase , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , ) SCREAMING_SNAKE_CASE_ : bool = field( default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=_UpperCamelCase , metadata={"""help""": """The number of processes to use for the preprocessing."""} , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=_UpperCamelCase , metadata={ """help""": ( """The maximum total input sequence length after tokenization. If passed, sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) SCREAMING_SNAKE_CASE_ : bool = field( default=_UpperCamelCase , metadata={ """help""": ( """Whether to pad all samples to the maximum sentence length. """ """If False, will pad the samples dynamically when batching to the maximum length in the batch. More """ """efficient on GPU but very bad for TPU.""" ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=_UpperCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of training examples to this """ """value if set.""" ) } , ) SCREAMING_SNAKE_CASE_ : Optional[int] = field( default=_UpperCamelCase , metadata={ """help""": ( """For debugging purposes or quicker training, truncate the number of evaluation examples to this """ """value if set.""" ) } , ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple: if self.train_file is not None: a = self.train_file.split("." )[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: a = self.validation_file.split("." )[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class snake_case__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : PreTrainedTokenizerBase SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = True SCREAMING_SNAKE_CASE_ : Optional[int] = None SCREAMING_SNAKE_CASE_ : Optional[int] = None def __call__( self : Dict , __lowerCamelCase : int ) -> List[Any]: a = "label" if "label" in features[0].keys() else "labels" a = [feature.pop(__lowerCamelCase ) for feature in features] a = len(__lowerCamelCase ) a = len(features[0]["input_ids"] ) a = [ [{k: v[i] for k, v in feature.items()} for i in range(__lowerCamelCase )] for feature in features ] a = list(chain(*__lowerCamelCase ) ) a = self.tokenizer.pad( __lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , ) # Un-flatten a = {k: v.view(__lowerCamelCase , __lowerCamelCase , -1 ) for k, v in batch.items()} # Add back labels a = torch.tensor(__lowerCamelCase , dtype=torch.intaa ) return batch def __magic_name__ ( ): '''simple docstring''' a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. a , a , a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: a , a , a = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", A, A ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() a = training_args.get_process_log_level() logger.setLevel(A ) datasets.utils.logging.set_verbosity(A ) transformers.utils.logging.set_verbosity(A ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. a = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: a = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: a = {} if data_args.train_file is not None: a = data_args.train_file if data_args.validation_file is not None: a = data_args.validation_file a = data_args.train_file.split("." )[-1] a = load_dataset( A, data_files=A, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. a = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. a = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) a = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) a = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=A, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. a = [F"""ending{i}""" for i in range(4 )] a = "sent1" a = "sent2" if data_args.max_seq_length is None: a = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value" " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can" " override this default with `--block_size xxx`." ) a = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the""" F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" ) a = min(data_args.max_seq_length, tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(A : Dict ): a = [[context] * 4 for context in examples[context_name]] a = examples[question_header_name] a = [ [F"""{header} {examples[end][i]}""" for end in ending_names] for i, header in enumerate(A ) ] # Flatten out a = list(chain(*A ) ) a = list(chain(*A ) ) # Tokenize a = tokenizer( A, A, truncation=A, max_length=A, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(A ), 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset" ) a = raw_datasets["train"] if data_args.max_train_samples is not None: a = min(len(A ), data_args.max_train_samples ) a = train_dataset.select(range(A ) ) with training_args.main_process_first(desc="train dataset map pre-processing" ): a = train_dataset.map( A, batched=A, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset" ) a = raw_datasets["validation"] if data_args.max_eval_samples is not None: a = min(len(A ), data_args.max_eval_samples ) a = eval_dataset.select(range(A ) ) with training_args.main_process_first(desc="validation dataset map pre-processing" ): a = eval_dataset.map( A, batched=A, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator a = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=A, pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(A : Any ): a , a = eval_predictions a = np.argmax(A, axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer a = Trainer( model=A, args=A, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=A, data_collator=A, compute_metrics=A, ) # Training if training_args.do_train: a = None if training_args.resume_from_checkpoint is not None: a = training_args.resume_from_checkpoint elif last_checkpoint is not None: a = last_checkpoint a = trainer.train(resume_from_checkpoint=A ) trainer.save_model() # Saves the tokenizer too for easy upload a = train_result.metrics a = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(A ) ) a = min(A, len(A ) ) trainer.log_metrics("train", A ) trainer.save_metrics("train", A ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***" ) a = trainer.evaluate() a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A ) a = min(A, len(A ) ) trainer.log_metrics("eval", A ) trainer.save_metrics("eval", A ) a = { "finetuned_from": model_args.model_name_or_path, "tasks": "multiple-choice", "dataset_tags": "swag", "dataset_args": "regular", "dataset": "SWAG", "language": "en", } if training_args.push_to_hub: trainer.push_to_hub(**A ) else: trainer.create_model_card(**A ) def __magic_name__ ( A : List[Any] ): '''simple docstring''' main() if __name__ == "__main__": main()
107
"""simple docstring""" from __future__ import annotations def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' print(F"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(UpperCamelCase__ ): print(F"""{i}\t\t{d}""" ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' for j in range(UpperCamelCase__ ): _a , _a , _a : List[str] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Dict = [float("""inf""" )] * vertex_count _a : Any = 0.0 for _ in range(vertex_count - 1 ): for j in range(UpperCamelCase__ ): _a , _a , _a : List[Any] = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: _a : Any = distance[u] + w _a : Union[str, Any] = check_negative_cycle(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() _snake_case = int(input('Enter number of vertices: ').strip()) _snake_case = int(input('Enter number of edges: ').strip()) _snake_case = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) _snake_case , _snake_case , _snake_case = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) _snake_case = {'src': src, 'dst': dest, 'weight': weight} _snake_case = int(input('\nEnter shortest path source:').strip()) _snake_case = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
294
0
"""simple docstring""" class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self , snake_case__ = "" , snake_case__ = False ): """simple docstring""" lowerCAmelCase : dict[str, RadixNode] = {} # A node will be a leaf if the tree contains its word lowerCAmelCase : str = is_leaf lowerCAmelCase : str = prefix def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : Dict = 0 for q, w in zip(self.prefix , snake_case__ ): if q != w: break x += 1 return self.prefix[:x], self.prefix[x:], word[x:] def lowercase__ ( self , snake_case__ ): """simple docstring""" for word in words: self.insert(snake_case__ ) def lowercase__ ( self , snake_case__ ): """simple docstring""" if self.prefix == word: lowerCAmelCase : Union[str, Any] = True # Case 2: The node has no edges that have a prefix to the word # Solution: We create an edge from the current node to a new one # containing the word elif word[0] not in self.nodes: lowerCAmelCase : Optional[Any] = RadixNode(prefix=snake_case__ , is_leaf=snake_case__ ) else: lowerCAmelCase : Tuple = self.nodes[word[0]] lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[Any] = incoming_node.match( snake_case__ ) # Case 3: The node prefix is equal to the matching # Solution: We insert remaining word on the next node if remaining_prefix == "": self.nodes[matching_string[0]].insert(snake_case__ ) # Case 4: The word is greater equal to the matching # Solution: Create a node in between both nodes, change # prefixes and add the new node for the remaining word else: lowerCAmelCase : Optional[Any] = remaining_prefix lowerCAmelCase : int = self.nodes[matching_string[0]] lowerCAmelCase : List[Any] = RadixNode(snake_case__ , snake_case__ ) lowerCAmelCase : Optional[int] = aux_node if remaining_word == "": lowerCAmelCase : Optional[int] = True else: self.nodes[matching_string[0]].insert(snake_case__ ) def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : str = self.nodes.get(word[0] , snake_case__ ) if not incoming_node: return False else: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = incoming_node.match( snake_case__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # This applies when the word and the prefix are equal elif remaining_word == "": return incoming_node.is_leaf # We have word remaining so we check the next node else: return incoming_node.find(snake_case__ ) def lowercase__ ( self , snake_case__ ): """simple docstring""" lowerCAmelCase : int = self.nodes.get(word[0] , snake_case__ ) if not incoming_node: return False else: lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = incoming_node.match( snake_case__ ) # If there is remaining prefix, the word can't be on the tree if remaining_prefix != "": return False # We have word remaining so we check the next node elif remaining_word != "": return incoming_node.delete(snake_case__ ) else: # If it is not a leaf, we don't have to delete if not incoming_node.is_leaf: return False else: # We delete the nodes if no edges go from it if len(incoming_node.nodes ) == 0: del self.nodes[word[0]] # We merge the current node with its only child if len(self.nodes ) == 1 and not self.is_leaf: lowerCAmelCase : List[str] = list(self.nodes.values() )[0] lowerCAmelCase : List[str] = merging_node.is_leaf self.prefix += merging_node.prefix lowerCAmelCase : Optional[int] = merging_node.nodes # If there is more than 1 edge, we just mark it as non-leaf elif len(incoming_node.nodes ) > 1: lowerCAmelCase : Optional[int] = False # If there is 1 edge, we merge it with its child else: lowerCAmelCase : Optional[Any] = list(incoming_node.nodes.values() )[0] lowerCAmelCase : int = merging_node.is_leaf incoming_node.prefix += merging_node.prefix lowerCAmelCase : Tuple = merging_node.nodes return True def lowercase__ ( self , snake_case__ = 0 ): """simple docstring""" if self.prefix != "": print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" ) for value in self.nodes.values(): value.print_tree(height + 1 ) def a__ ( ): '''simple docstring''' lowerCAmelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split() lowerCAmelCase : List[str] = RadixNode() root.insert_many(SCREAMING_SNAKE_CASE ) assert all(root.find(SCREAMING_SNAKE_CASE ) for word in words ) assert not root.find("bandanas" ) assert not root.find("apps" ) root.delete("all" ) assert not root.find("all" ) root.delete("banana" ) assert not root.find("banana" ) assert root.find("bananas" ) return True def a__ ( ): '''simple docstring''' assert test_trie() def a__ ( ): '''simple docstring''' lowerCAmelCase : Dict = RadixNode() lowerCAmelCase : Optional[Any] = "banana bananas bandanas bandana band apple all beast".split() root.insert_many(SCREAMING_SNAKE_CASE ) print("Words:" , SCREAMING_SNAKE_CASE ) print("Tree:" ) root.print_tree() if __name__ == "__main__": main()
108
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _snake_case = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def _snake_case ( UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ): # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: UpperCAmelCase : int = ksize + 1 UpperCAmelCase : Any = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(UpperCamelCase ): for x in range(UpperCamelCase ): # distance from center UpperCAmelCase : int = x - ksize // 2 UpperCAmelCase : Optional[int] = y - ksize // 2 # degree to radiant UpperCAmelCase : List[str] = theta / 180 * np.pi UpperCAmelCase : str = np.cos(_theta ) UpperCAmelCase : Optional[Any] = np.sin(_theta ) # get kernel x UpperCAmelCase : List[Any] = cos_theta * px + sin_theta * py # get kernel y UpperCAmelCase : Any = -sin_theta * px + cos_theta * py # fill kernel UpperCAmelCase : Any = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image A: Any = imread("../image_data/lena.jpg") # turn image in gray scale value A: Optional[int] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges A: List[str] = np.zeros(gray.shape[:2]) for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]: A: List[str] = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) A: str = out / out.max() * 2_5_5 A: List[Any] = out.astype(np.uinta) imshow("Original", gray) imshow("Gabor filter with 20x20 mask and 6 directions", out) waitKey(0)
109
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ): '''simple docstring''' _a : Optional[Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError("""All input parameters must be positive""" ) if any(p > 1 for p in parameters[1:4] ): raise ValueError("""Relative densities cannot be greater than one""" ) else: _a : Tuple = 1 - (matter_density + radiation_density + dark_energy) _a : int = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) _a : List[str] = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _snake_case = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
294
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Tuple = { 'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'], 'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'], 'processing_wav2vec2': ['Wav2Vec2Processor'], 'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = [ 'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Wav2Vec2ForAudioFrameClassification', 'Wav2Vec2ForCTC', 'Wav2Vec2ForMaskedLM', 'Wav2Vec2ForPreTraining', 'Wav2Vec2ForSequenceClassification', 'Wav2Vec2ForXVector', 'Wav2Vec2Model', 'Wav2Vec2PreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[Any] = [ 'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFWav2Vec2ForCTC', 'TFWav2Vec2Model', 'TFWav2Vec2PreTrainedModel', 'TFWav2Vec2ForSequenceClassification', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = [ 'FlaxWav2Vec2ForCTC', 'FlaxWav2Vec2ForPreTraining', 'FlaxWav2Vec2Model', 'FlaxWav2Vec2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
"""simple docstring""" import shutil import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_tf_cross_test, require_tf, require_torch, require_torchvision, require_vision, ) from transformers.utils import is_tf_available, is_torch_available, is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, SamImageProcessor, SamProcessor if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : List[Any] ) -> Dict: _a : Optional[int] = tempfile.mkdtemp() _a : Optional[Any] = SamImageProcessor() _a : int = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Tuple , **UpperCAmelCase__ : Any ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : str ) -> int: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Tuple ) -> Dict: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : Optional[int] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Dict ) -> Dict: _a : List[Any] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : Tuple = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : Union[str, Any] ) -> Tuple: _a : Optional[Any] = self.get_image_processor() _a : int = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Union[str, Any] = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_torch def _lowercase ( self : Optional[Any] ) -> Optional[Any]: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = [torch.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : Optional[int] = [[683, 1024]] _a : List[Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : int = processor.post_process_masks( UpperCAmelCase__ , torch.tensor(UpperCAmelCase__ ) , torch.tensor(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : Optional[Any] = [np.ones((1, 3, 5, 5) )] _a : Tuple = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : List[str] = [[1, 0], [0, 1]] with self.assertRaises(UpperCAmelCase__ ): _a : str = processor.post_process_masks(UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) ) @require_vision @require_tf class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : Any ) -> List[str]: _a : List[str] = tempfile.mkdtemp() _a : Any = SamImageProcessor() _a : Union[str, Any] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : List[str] , **UpperCAmelCase__ : Any ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Optional[Any] ) -> Union[str, Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Dict ) -> List[str]: _a : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: _a : Optional[int] = SamProcessor(image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _a : str = self.get_image_processor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> str: _a : Union[str, Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : int = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(images=UpperCAmelCase__ , return_tensors="""np""" ) input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) @require_tf def _lowercase ( self : Optional[Any] ) -> int: _a : Optional[Any] = self.get_image_processor() _a : Dict = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Any = [tf.ones((1, 3, 5, 5) )] _a : Tuple = [[1764, 2646]] _a : str = [[683, 1024]] _a : Union[str, Any] = processor.post_process_masks(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Union[str, Any] = processor.post_process_masks( UpperCAmelCase__ , tf.convert_to_tensor(UpperCAmelCase__ ) , tf.convert_to_tensor(UpperCAmelCase__ ) , return_tensors="""tf""" , ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) # should also work with np _a : List[Any] = [np.ones((1, 3, 5, 5) )] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) ) _a : Dict = [[1, 0], [0, 1]] with self.assertRaises(tf.errors.InvalidArgumentError ): _a : List[Any] = processor.post_process_masks( UpperCAmelCase__ , np.array(UpperCAmelCase__ ) , np.array(UpperCAmelCase__ ) , return_tensors="""tf""" ) @require_vision @require_torchvision class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : str ) -> Optional[Any]: _a : Optional[Any] = tempfile.mkdtemp() _a : Dict = SamImageProcessor() _a : List[str] = SamProcessor(UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) def _lowercase ( self : Any , **UpperCAmelCase__ : Dict ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase__ ).image_processor def _lowercase ( self : Tuple ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : str ) -> int: _a : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _a : int = [Image.fromarray(np.moveaxis(UpperCAmelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs @is_pt_tf_cross_test def _lowercase ( self : int ) -> List[Any]: _a : Optional[Any] = self.get_image_processor() _a : Optional[Any] = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Tuple = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa ) _a : str = [tf.convert_to_tensor(UpperCAmelCase__ )] _a : Optional[int] = [torch.tensor(UpperCAmelCase__ )] _a : Union[str, Any] = [[1764, 2646]] _a : List[str] = [[683, 1024]] _a : Optional[int] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""tf""" ) _a : List[str] = processor.post_process_masks( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , return_tensors="""pt""" ) self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) ) @is_pt_tf_cross_test def _lowercase ( self : str ) -> Optional[Any]: _a : List[Any] = self.get_image_processor() _a : Any = SamProcessor(image_processor=UpperCAmelCase__ ) _a : Dict = self.prepare_image_inputs() _a : List[str] = image_processor(UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : str = processor(images=UpperCAmelCase__ , return_tensors="""pt""" )["""pixel_values"""].numpy() _a : Optional[Any] = image_processor(UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() _a : Optional[int] = processor(images=UpperCAmelCase__ , return_tensors="""tf""" )["""pixel_values"""].numpy() self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) ) self.assertTrue(np.allclose(UpperCAmelCase__ , UpperCAmelCase__ ) )
294
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs UpperCAmelCase : Optional[Any] = imread(r"digital_image_processing/image_data/lena_small.jpg") UpperCAmelCase : Optional[int] = cvtColor(img, COLOR_BGR2GRAY) def _SCREAMING_SNAKE_CASE () -> List[str]: '''simple docstring''' lowercase_ = cn.convert_to_negative(UpperCamelCase__ ) # assert negative_img array for at least one True assert negative_img.any() def _SCREAMING_SNAKE_CASE () -> str: '''simple docstring''' with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(UpperCamelCase__ , 1_10 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def _SCREAMING_SNAKE_CASE () -> Tuple: '''simple docstring''' lowercase_ = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _SCREAMING_SNAKE_CASE () -> List[str]: '''simple docstring''' lowercase_ = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() lowercase_ = canny.canny(UpperCamelCase__ ) # assert canny array for at least one True assert canny_array.any() def _SCREAMING_SNAKE_CASE () -> str: '''simple docstring''' assert gg.gaussian_filter(UpperCamelCase__ , 5 , sigma=0.9 ).all() def _SCREAMING_SNAKE_CASE () -> List[Any]: '''simple docstring''' lowercase_ = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) lowercase_ = conv.img_convolve(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ ) assert res.any() def _SCREAMING_SNAKE_CASE () -> Any: '''simple docstring''' assert med.median_filter(UpperCamelCase__ , 3 ).any() def _SCREAMING_SNAKE_CASE () -> List[Any]: '''simple docstring''' lowercase_ = sob.sobel_filter(UpperCamelCase__ ) assert grad.any() and theta.any() def _SCREAMING_SNAKE_CASE () -> Any: '''simple docstring''' lowercase_ = sp.make_sepia(UpperCamelCase__ , 20 ) assert sepia.all() def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" ) -> Any: '''simple docstring''' lowercase_ = bs.Burkes(imread(UpperCamelCase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = "digital_image_processing/image_data/lena_small.jpg" , ) -> int: '''simple docstring''' lowercase_ = rs.NearestNeighbour(imread(UpperCamelCase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _SCREAMING_SNAKE_CASE () -> int: '''simple docstring''' lowercase_ = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. lowercase_ = imread(UpperCamelCase__ , 0 ) # Test for get_neighbors_pixel function() return not None lowercase_ = 0 lowercase_ = 0 lowercase_ = image[x_coordinate][y_coordinate] lowercase_ = lbp.get_neighbors_pixel( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowercase_ = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): lowercase_ = lbp.local_binary_value(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) assert lbp_image.any()
136
"""simple docstring""" import argparse import gc import json import os import re import torch from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint _snake_case = { '169M': 12, '430M': 24, '1B5': 24, '3B': 32, '7B': 32, '14B': 40, } _snake_case = { '169M': 768, '430M': 1024, '1B5': 2048, '3B': 2560, '7B': 4096, '14B': 5120, } def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : int = list(state_dict.keys() ) for name in state_dict_keys: _a : str = state_dict.pop(UpperCamelCase__ ) # emb -> embedding if name.startswith("""emb.""" ): _a : Dict = name.replace("""emb.""" , """embeddings.""" ) # ln_0 -> pre_ln (only present at block 0) if name.startswith("""blocks.0.ln0""" ): _a : Dict = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" ) # att -> attention _a : Any = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase__ ) # ffn -> feed_forward _a : int = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase__ ) # time_mix_k -> time_mix_key and reshape if name.endswith(""".time_mix_k""" ): _a : List[str] = name.replace(""".time_mix_k""" , """.time_mix_key""" ) # time_mix_v -> time_mix_value and reshape if name.endswith(""".time_mix_v""" ): _a : Tuple = name.replace(""".time_mix_v""" , """.time_mix_value""" ) # time_mix_r -> time_mix_key and reshape if name.endswith(""".time_mix_r""" ): _a : Dict = name.replace(""".time_mix_r""" , """.time_mix_receptance""" ) if name != "head.weight": _a : Optional[int] = """rwkv.""" + name _a : Any = weight return state_dict def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=None ): '''simple docstring''' # 1. If possible, build the tokenizer. if tokenizer_file is None: print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" ) _a : Tuple = 5_0_2_7_7 _a : str = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" ) else: _a : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase__ ) _a : int = len(UpperCamelCase__ ) tokenizer.save_pretrained(UpperCamelCase__ ) # 2. Build the config _a : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() ) if size is None: # Try to infer size from the checkpoint name for candidate in possible_sizes: if candidate in checkpoint_file: _a : Tuple = candidate break if size is None: raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" ) if size not in possible_sizes: raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" ) _a : List[Any] = RwkvConfig( vocab_size=UpperCamelCase__ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , ) config.save_pretrained(UpperCamelCase__ ) # 3. Download model file then convert state_dict _a : str = hf_hub_download(UpperCamelCase__ , UpperCamelCase__ ) _a : int = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : List[str] = convert_state_dict(UpperCamelCase__ ) # 4. Split in shards and save _a , _a : List[str] = shard_checkpoint(UpperCamelCase__ ) for shard_file, shard in shards.items(): torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if index is not None: _a : Optional[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) # Save the index as well with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f: _a : Dict = json.dumps(UpperCamelCase__ , indent=2 , sort_keys=UpperCamelCase__ ) + """\n""" f.write(UpperCamelCase__ ) # 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict print( """Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" ) _a : List[Any] = list(shards.keys() ) del state_dict del shards gc.collect() for shard_file in shard_files: _a : Any = torch.load(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) del state_dict gc.collect() if push_to_hub: if model_name is None: raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" ) _a : Dict = AutoModelForCausalLM.from_pretrained(UpperCamelCase__ ) model.push_to_hub(UpperCamelCase__ , max_shard_size="""2GB""" ) tokenizer.push_to_hub(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.' ) parser.add_argument( '--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.' ) parser.add_argument( '--output_dir', default=None, type=str, required=True, help='Where to save the converted model.' ) parser.add_argument( '--tokenizer_file', default=None, type=str, help='Path to the tokenizer file to use (if not provided, only the model is converted).', ) parser.add_argument( '--size', default=None, type=str, help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Push to the Hub the converted model.', ) parser.add_argument( '--model_name', default=None, type=str, help='Name of the pushed model on the Hub, including the username / organization.', ) _snake_case = parser.parse_args() convert_rmkv_checkpoint_to_hf_format( args.repo_id, args.checkpoint_file, args.output_dir, size=args.size, tokenizer_file=args.tokenizer_file, push_to_hub=args.push_to_hub, model_name=args.model_name, )
294
0
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def A__ ( SCREAMING_SNAKE_CASE__) -> Tuple: __snake_case: int = int(number**0.5) return number == sq * sq def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Dict: __snake_case: int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den __snake_case: int = x_den * y_den * z_den __snake_case: int = gcd(UpperCamelCase__ , UpperCamelCase__) top //= hcf bottom //= hcf return top, bottom def A__ ( SCREAMING_SNAKE_CASE__ = 35) -> Tuple: __snake_case: set = set() __snake_case: int __snake_case: Fraction = Fraction(0) __snake_case: tuple[int, int] for x_num in range(1 , order + 1): for x_den in range(x_num + 1 , order + 1): for y_num in range(1 , order + 1): for y_den in range(y_num + 1 , order + 1): # n=1 __snake_case: Union[str, Any] = x_num * y_den + x_den * y_num __snake_case: List[str] = x_den * y_den __snake_case: Dict = gcd(UpperCamelCase__ , UpperCamelCase__) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case: Any = add_three( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) unique_s.add(UpperCamelCase__) # n=2 __snake_case: Optional[Any] = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) __snake_case: Any = x_den * x_den * y_den * y_den if is_sq(UpperCamelCase__) and is_sq(UpperCamelCase__): __snake_case: int = int(sqrt(UpperCamelCase__)) __snake_case: List[Any] = int(sqrt(UpperCamelCase__)) __snake_case: Optional[Any] = gcd(UpperCamelCase__ , UpperCamelCase__) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case: Tuple = add_three( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) unique_s.add(UpperCamelCase__) # n=-1 __snake_case: Optional[int] = x_num * y_num __snake_case: List[str] = x_den * y_num + x_num * y_den __snake_case: Tuple = gcd(UpperCamelCase__ , UpperCamelCase__) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case: List[Any] = add_three( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) unique_s.add(UpperCamelCase__) # n=2 __snake_case: List[Any] = x_num * x_num * y_num * y_num __snake_case: str = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(UpperCamelCase__) and is_sq(UpperCamelCase__): __snake_case: Dict = int(sqrt(UpperCamelCase__)) __snake_case: Any = int(sqrt(UpperCamelCase__)) __snake_case: List[Any] = gcd(UpperCamelCase__ , UpperCamelCase__) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: __snake_case: List[str] = add_three( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) unique_s.add(UpperCamelCase__) for num, den in unique_s: total += Fraction(UpperCamelCase__ , UpperCamelCase__) return total.denominator + total.numerator if __name__ == "__main__": print(f'{solution() = }')
111
"""simple docstring""" import shutil import tempfile import unittest from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast from transformers.testing_utils import require_sentencepiece, require_torchaudio from .test_feature_extraction_clap import floats_list @require_torchaudio @require_sentencepiece class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> List[str]: _a : Any = """laion/clap-htsat-unfused""" _a : Union[str, Any] = tempfile.mkdtemp() def _lowercase ( self : List[Any] , **UpperCAmelCase__ : Any ) -> Dict: return RobertaTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] , **UpperCAmelCase__ : List[str] ) -> int: return ClapFeatureExtractor.from_pretrained(self.checkpoint , **UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> Tuple: shutil.rmtree(self.tmpdirname ) def _lowercase ( self : List[str] ) -> Optional[int]: _a : List[str] = self.get_tokenizer() _a : Any = self.get_feature_extractor() _a : Optional[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) processor.save_pretrained(self.tmpdirname ) _a : List[str] = ClapProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> Optional[int]: _a : Tuple = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() ) processor.save_pretrained(self.tmpdirname ) _a : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) _a : Union[str, Any] = self.get_feature_extractor(do_normalize=UpperCAmelCase__ , padding_value=1.0 ) _a : Union[str, Any] = ClapProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCAmelCase__ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase__ ) self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCAmelCase__ ) def _lowercase ( self : List[str] ) -> Optional[Any]: _a : Optional[int] = self.get_feature_extractor() _a : Tuple = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = floats_list((3, 1000) ) _a : List[Any] = feature_extractor(UpperCAmelCase__ , return_tensors="""np""" ) _a : List[str] = processor(audios=UpperCAmelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Tuple ) -> Optional[int]: _a : List[str] = self.get_feature_extractor() _a : Any = self.get_tokenizer() _a : Any = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Optional[int] = """This is a test string""" _a : Tuple = processor(text=UpperCAmelCase__ ) _a : int = tokenizer(UpperCAmelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : List[Any] ) -> Any: _a : str = self.get_feature_extractor() _a : List[str] = self.get_tokenizer() _a : List[Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) _a : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _a : Dict = processor.batch_decode(UpperCAmelCase__ ) _a : Any = tokenizer.batch_decode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : Dict ) -> List[str]: _a : str = self.get_feature_extractor() _a : Optional[Any] = self.get_tokenizer() _a : Union[str, Any] = ClapProcessor(tokenizer=UpperCAmelCase__ , feature_extractor=UpperCAmelCase__ ) self.assertListEqual( processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
294
0
import inspect from typing import List, Optional, Tuple, Union import numpy as np import PIL import torch import torch.utils.checkpoint from ...models import UNetaDModel, VQModel from ...schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ...utils import PIL_INTERPOLATION, randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput def _A ( SCREAMING_SNAKE_CASE__ : int ): UpperCamelCase :List[str] = image.size UpperCamelCase :Optional[Any] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32 UpperCamelCase :str = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] ) UpperCamelCase :List[str] = np.array(UpperCamelCase__ ).astype(np.floataa ) / 2_55.0 UpperCamelCase :Dict = image[None].transpose(0 , 3 , 1 , 2 ) UpperCamelCase :List[Any] = torch.from_numpy(UpperCamelCase__ ) return 2.0 * image - 1.0 class UpperCAmelCase_ ( snake_case_ ): """simple docstring""" def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> str: super().__init__() self.register_modules(vqvae=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ ) @torch.no_grad() def __call__( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 100 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , ) -> Union[Tuple, ImagePipelineOutput]: if isinstance(UpperCAmelCase__ , PIL.Image.Image ): UpperCamelCase :str = 1 elif isinstance(UpperCAmelCase__ , torch.Tensor ): UpperCamelCase :int = image.shape[0] else: raise ValueError(F'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(UpperCAmelCase__ )}''' ) if isinstance(UpperCAmelCase__ , PIL.Image.Image ): UpperCamelCase :List[Any] = preprocess(UpperCAmelCase__ ) UpperCamelCase :Tuple = image.shape[-2:] # in_channels should be 6: 3 for latents, 3 for low resolution image UpperCamelCase :List[str] = (batch_size, self.unet.config.in_channels // 2, height, width) UpperCamelCase :Union[str, Any] = next(self.unet.parameters() ).dtype UpperCamelCase :List[str] = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=self.device , dtype=UpperCAmelCase__ ) UpperCamelCase :Union[str, Any] = image.to(device=self.device , dtype=UpperCAmelCase__ ) # set timesteps and move to the correct device self.scheduler.set_timesteps(UpperCAmelCase__ , device=self.device ) UpperCamelCase :int = self.scheduler.timesteps # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase :Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase :Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase :Optional[Any] = {} if accepts_eta: UpperCamelCase :Dict = eta for t in self.progress_bar(UpperCAmelCase__ ): # concat latents and low resolution image in the channel dimension. UpperCamelCase :Optional[Any] = torch.cat([latents, image] , dim=1 ) UpperCamelCase :List[Any] = self.scheduler.scale_model_input(UpperCAmelCase__ , UpperCAmelCase__ ) # predict the noise residual UpperCamelCase :List[str] = self.unet(UpperCAmelCase__ , UpperCAmelCase__ ).sample # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase :int = self.scheduler.step(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ ).prev_sample # decode the image latents with the VQVAE UpperCamelCase :int = self.vqvae.decode(UpperCAmelCase__ ).sample UpperCamelCase :Dict = torch.clamp(UpperCAmelCase__ , -1.0 , 1.0 ) UpperCamelCase :Dict = image / 2 + 0.5 UpperCamelCase :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": UpperCamelCase :Optional[int] = self.numpy_to_pil(UpperCAmelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase__ )
259
"""simple docstring""" import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor _snake_case = logging.get_logger(__name__) class UpperCamelCase ( snake_case_ ): def __init__( self : Any , *UpperCAmelCase__ : Optional[Any] , **UpperCAmelCase__ : Tuple ) -> None: warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCAmelCase__ , ) super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
294
0
import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def lowerCAmelCase_ ( __A ) -> str: '''simple docstring''' UpperCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) ) UpperCAmelCase__ = FileLock(str(tmpdir / "foo.lock" ) ) UpperCAmelCase__ = 0.01 with locka.acquire(): with pytest.raises(UpperCamelCase__ ): UpperCAmelCase__ = time.time() locka.acquire(UpperCamelCase__ ) assert time.time() - _start > timeout def lowerCAmelCase_ ( __A ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ = """a""" * 1_000 + """.lock""" UpperCAmelCase__ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(UpperCamelCase__ ) assert len(os.path.basename(locka._lock_file ) ) <= 255 UpperCAmelCase__ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(UpperCamelCase__ ): locka.acquire(0 )
65
"""simple docstring""" import unittest import numpy as np def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ): '''simple docstring''' _a : List[Any] = np.shape(UpperCamelCase__ ) _a : Any = np.shape(UpperCamelCase__ ) _a : Union[str, Any] = np.shape(UpperCamelCase__ ) if shape_a[0] != shape_b[0]: _a : int = ( """Expected the same number of rows for A and B. """ F"""Instead found A of size {shape_a} and B of size {shape_b}""" ) raise ValueError(UpperCamelCase__ ) if shape_b[1] != shape_c[1]: _a : Tuple = ( """Expected the same number of columns for B and C. """ F"""Instead found B of size {shape_b} and C of size {shape_c}""" ) raise ValueError(UpperCamelCase__ ) _a : int = pseudo_inv if a_inv is None: try: _a : Optional[int] = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: raise ValueError( """Input matrix A is not invertible. Cannot compute Schur complement.""" ) return mat_c - mat_b.T @ a_inv @ mat_b class UpperCamelCase ( unittest.TestCase ): def _lowercase ( self : int ) -> None: _a : str = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Tuple = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Optional[int] = np.array([[2, 1], [6, 3]] ) _a : Optional[Any] = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _a : Union[str, Any] = np.block([[a, b], [b.T, c]] ) _a : int = np.linalg.det(UpperCAmelCase__ ) _a : Union[str, Any] = np.linalg.det(UpperCAmelCase__ ) _a : List[Any] = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def _lowercase ( self : int ) -> None: _a : Optional[int] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Optional[int] = np.array([[0, 3], [3, 0], [2, 3]] ) _a : Union[str, Any] = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def _lowercase ( self : List[Any] ) -> None: _a : Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a : Dict = np.array([[0, 3], [3, 0], [2, 3]] ) _a : List[Any] = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
294
0
'''simple docstring''' from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging A_ = logging.get_logger(__name__) A_ = { "EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json", # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _snake_case ( snake_case_ ): _A : Optional[Any] = '''gptj''' _A : Optional[Any] = { '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int]=50_400 ,SCREAMING_SNAKE_CASE__ : Tuple=2_048 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4_096 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=28 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=16 ,SCREAMING_SNAKE_CASE__ : str=64 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,SCREAMING_SNAKE_CASE__ : Tuple="gelu_new" ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.0 ,SCREAMING_SNAKE_CASE__ : List[Any]=1e-5 ,SCREAMING_SNAKE_CASE__ : List[str]=0.02 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=50_256 ,SCREAMING_SNAKE_CASE__ : Dict=50_256 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ,): SCREAMING_SNAKE_CASE:Optional[Any] = vocab_size SCREAMING_SNAKE_CASE:Union[str, Any] = n_positions SCREAMING_SNAKE_CASE:Any = n_embd SCREAMING_SNAKE_CASE:List[Any] = n_layer SCREAMING_SNAKE_CASE:str = n_head SCREAMING_SNAKE_CASE:str = n_inner SCREAMING_SNAKE_CASE:Dict = rotary_dim SCREAMING_SNAKE_CASE:List[str] = activation_function SCREAMING_SNAKE_CASE:Optional[Any] = resid_pdrop SCREAMING_SNAKE_CASE:Any = embd_pdrop SCREAMING_SNAKE_CASE:int = attn_pdrop SCREAMING_SNAKE_CASE:Union[str, Any] = layer_norm_epsilon SCREAMING_SNAKE_CASE:Dict = initializer_range SCREAMING_SNAKE_CASE:Dict = use_cache SCREAMING_SNAKE_CASE:Dict = bos_token_id SCREAMING_SNAKE_CASE:int = eos_token_id super().__init__( bos_token_id=UpperCAmelCase__ ,eos_token_id=UpperCAmelCase__ ,tie_word_embeddings=UpperCAmelCase__ ,**UpperCAmelCase__ ) class _snake_case ( snake_case_ ): def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : PretrainedConfig ,SCREAMING_SNAKE_CASE__ : str = "default" ,SCREAMING_SNAKE_CASE__ : List[PatchingSpec] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,): super().__init__(UpperCAmelCase__ ,task=UpperCAmelCase__ ,patching_specs=UpperCAmelCase__ ,use_past=UpperCAmelCase__ ) if not getattr(self._config ,"pad_token_id" ,UpperCAmelCase__ ): # TODO: how to do that better? SCREAMING_SNAKE_CASE:List[Any] = 0 @property def __UpperCamelCase ( self : Dict ): SCREAMING_SNAKE_CASE:Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase__ ,direction="inputs" ) SCREAMING_SNAKE_CASE:Optional[Any] = {0: """batch""", 1: """past_sequence + sequence"""} else: SCREAMING_SNAKE_CASE:Union[str, Any] = {0: """batch""", 1: """sequence"""} return common_inputs @property def __UpperCamelCase ( self : Optional[Any] ): return self._config.n_layer @property def __UpperCamelCase ( self : List[Any] ): return self._config.n_head def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : PreTrainedTokenizer ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None ,): SCREAMING_SNAKE_CASE:Tuple = super(UpperCAmelCase__ ,self ).generate_dummy_inputs( UpperCAmelCase__ ,batch_size=UpperCAmelCase__ ,seq_length=UpperCAmelCase__ ,is_pair=UpperCAmelCase__ ,framework=UpperCAmelCase__ ) # We need to order the input in the way they appears in the forward() SCREAMING_SNAKE_CASE:str = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch SCREAMING_SNAKE_CASE:int = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values SCREAMING_SNAKE_CASE:Optional[int] = seqlen + 2 SCREAMING_SNAKE_CASE:Union[str, Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) SCREAMING_SNAKE_CASE:str = [ (torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) for _ in range(self.num_layers ) ] SCREAMING_SNAKE_CASE:int = common_inputs["""attention_mask"""] if self.use_past: SCREAMING_SNAKE_CASE:Tuple = ordered_inputs["""attention_mask"""].dtype SCREAMING_SNAKE_CASE:Optional[Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(UpperCAmelCase__ ,UpperCAmelCase__ ,dtype=UpperCAmelCase__ )] ,dim=1 ) return ordered_inputs @property def __UpperCamelCase ( self : List[Any] ): return 13
139
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() _snake_case = logging.get_logger(__name__) _snake_case = torch.device('cpu') def lowerCAmelCase__ ( ): '''simple docstring''' _a : int = """http://images.cocodataset.org/val2017/000000039769.jpg""" _a : Dict = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.17_03e00, 2.11_07e00, -2.08_11e00, 8.86_85e-01, 2.43_60e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.96_36e-01, 2.34_78e-01, -1.69_63e00, -1.73_81e00, -8.63_37e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.27_68e-01, -4.74_29e-01, -1.08_97e00, -1.02_48e00, 3.55_23e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.53_30e-01, 2.42_11e-01, -6.01_85e-01, -8.27_89e-01, -6.04_46e-02] ) def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Any = dct.pop(UpperCamelCase__ ) _a : Dict = val def lowerCAmelCase__ ( UpperCamelCase__ ): '''simple docstring''' _a : Tuple = [] for k in state_dict.keys(): _a : Any = k if ".pwconv" in k: _a : int = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: _a : List[str] = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: _a : Optional[int] = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: _a : Tuple = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: _a : int = k_new.split(""".""" ) if ls[2].isdigit(): _a : Union[str, Any] = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: _a : Tuple = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' _a : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size _a : Optional[int] = 1_0_0_0 _a : Optional[Any] = """huggingface/label-files""" _a : Optional[Any] = """imagenet-1k-id2label.json""" _a : List[str] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) ) _a : Optional[Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _a : Dict = idalabel _a : Optional[int] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": _a : Any = [3, 3, 6, 4] _a : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": _a : Any = [3, 3, 9, 6] _a : List[str] = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": _a : List[Any] = [4, 3, 1_0, 5] _a : Optional[int] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": _a : List[Any] = [4, 4, 1_2, 6] _a : Optional[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): _a : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" , check_hash=UpperCamelCase__ ) else: _a : Dict = torch.load(UpperCamelCase__ , map_location="""cpu""" ) _a : int = checkpoint _a : Optional[Any] = create_rename_keys(UpperCamelCase__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model _a : Any = SwiftFormerForImageClassification(UpperCamelCase__ ).eval() hf_model.load_state_dict(UpperCamelCase__ ) # prepare test inputs _a : Any = prepare_img() _a : Union[str, Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) _a : Optional[int] = processor(images=UpperCamelCase__ , return_tensors="""pt""" ) # compare outputs from both models _a : Dict = get_expected_output(UpperCamelCase__ ) _a : int = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , UpperCamelCase__ , atol=1e-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _snake_case = argparse.ArgumentParser() # Required parameters parser.add_argument( '--swiftformer_name', default='swiftformer_xs', choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'], type=str, help='Name of the SwiftFormer model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default='./converted_outputs/', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.') _snake_case = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
294
0
import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class lowercase__ ( snake_case_, unittest.TestCase ): a_ =BertTokenizer a_ =BertTokenizerFast a_ =True a_ =True a_ =filter_non_english def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' super().setUp() lowerCAmelCase__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCAmelCase ( self , __UpperCAmelCase )-> Any: '''simple docstring''' lowerCAmelCase__ = """UNwant\u00E9d,running""" lowerCAmelCase__ = """unwanted, running""" return input_text, output_text def UpperCAmelCase ( self )-> Optional[Any]: '''simple docstring''' lowerCAmelCase__ = self.tokenizer_class(self.vocab_file ) lowerCAmelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(UpperCAmelCase__ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [9, 6, 7, 12, 10, 11] ) def UpperCAmelCase ( self )-> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_rust_tokenizer() lowerCAmelCase__ = """UNwant\u00E9d,running""" lowerCAmelCase__ = tokenizer.tokenize(UpperCAmelCase__ ) lowerCAmelCase__ = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ = self.get_rust_tokenizer() lowerCAmelCase__ = tokenizer.encode(UpperCAmelCase__ ) lowerCAmelCase__ = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # With lower casing lowerCAmelCase__ = self.get_tokenizer(do_lower_case=UpperCAmelCase__ ) lowerCAmelCase__ = self.get_rust_tokenizer(do_lower_case=UpperCAmelCase__ ) lowerCAmelCase__ = """UNwant\u00E9d,running""" lowerCAmelCase__ = tokenizer.tokenize(UpperCAmelCase__ ) lowerCAmelCase__ = rust_tokenizer.tokenize(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = rust_tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ = self.get_rust_tokenizer() lowerCAmelCase__ = tokenizer.encode(UpperCAmelCase__ ) lowerCAmelCase__ = rust_tokenizer.encode(UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] ) def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] ) def UpperCAmelCase ( self )-> Dict: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase ( self )-> Dict: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase ( self )-> Union[str, Any]: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , strip_accents=UpperCAmelCase__ ) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer(do_lower_case=UpperCAmelCase__ , never_split=["[UNK]"] ) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def UpperCAmelCase ( self )-> int: '''simple docstring''' lowerCAmelCase__ = BasicTokenizer() lowerCAmelCase__ = """a\n'll !!to?'d of, can't.""" lowerCAmelCase__ = ["""a""", """'""", """ll""", """!""", """!""", """to""", """?""", """'""", """d""", """of""", """,""", """can""", """'""", """t""", """."""] self.assertListEqual(tokenizer.tokenize(UpperCAmelCase__ ) , UpperCAmelCase__ ) def UpperCAmelCase ( self )-> Dict: '''simple docstring''' lowerCAmelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""] lowerCAmelCase__ = {} for i, token in enumerate(UpperCAmelCase__ ): lowerCAmelCase__ = i lowerCAmelCase__ = WordpieceTokenizer(vocab=UpperCAmelCase__ , unk_token="[UNK]" ) self.assertListEqual(tokenizer.tokenize("" ) , [] ) self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] ) self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] ) def UpperCAmelCase ( self )-> List[str]: '''simple docstring''' self.assertTrue(_is_whitespace(" " ) ) self.assertTrue(_is_whitespace("\t" ) ) self.assertTrue(_is_whitespace("\r" ) ) self.assertTrue(_is_whitespace("\n" ) ) self.assertTrue(_is_whitespace("\u00A0" ) ) self.assertFalse(_is_whitespace("A" ) ) self.assertFalse(_is_whitespace("-" ) ) def UpperCAmelCase ( self )-> str: '''simple docstring''' self.assertTrue(_is_control("\u0005" ) ) self.assertFalse(_is_control("A" ) ) self.assertFalse(_is_control(" " ) ) self.assertFalse(_is_control("\t" ) ) self.assertFalse(_is_control("\r" ) ) def UpperCAmelCase ( self )-> str: '''simple docstring''' self.assertTrue(_is_punctuation("-" ) ) self.assertTrue(_is_punctuation("$" ) ) self.assertTrue(_is_punctuation("`" ) ) self.assertTrue(_is_punctuation("." ) ) self.assertFalse(_is_punctuation("A" ) ) self.assertFalse(_is_punctuation(" " ) ) def UpperCAmelCase ( self )-> Tuple: '''simple docstring''' lowerCAmelCase__ = self.get_tokenizer() lowerCAmelCase__ = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) self.assertListEqual( [rust_tokenizer.tokenize(UpperCAmelCase__ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] ) @slow def UpperCAmelCase ( self )-> Optional[int]: '''simple docstring''' lowerCAmelCase__ = self.tokenizer_class.from_pretrained("bert-base-uncased" ) lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def UpperCAmelCase ( self )-> int: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase__ = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." lowerCAmelCase__ = tokenizer_r.encode_plus( UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , ) lowerCAmelCase__ = tokenizer_r.do_lower_case if hasattr(UpperCAmelCase__ , "do_lower_case" ) else False lowerCAmelCase__ = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """A"""), ((1, 2), ""","""), ((3, 5), """na"""), ((5, 6), """##ï"""), ((6, 8), """##ve"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """Allen"""), ((21, 23), """##NL"""), ((23, 24), """##P"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), """a"""), ((1, 2), ""","""), ((3, 8), """naive"""), ((9, 15), tokenizer_r.mask_token), ((16, 21), """allen"""), ((21, 23), """##nl"""), ((23, 24), """##p"""), ((25, 33), """sentence"""), ((33, 34), """."""), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) ) self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] ) def UpperCAmelCase ( self )-> Any: '''simple docstring''' lowerCAmelCase__ = ["""的""", """人""", """有"""] lowerCAmelCase__ = """""".join(UpperCAmelCase__ ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ): lowerCAmelCase__ = True lowerCAmelCase__ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCAmelCase__ = False lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase__ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_r.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_p.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_r.convert_ids_to_tokens(UpperCAmelCase__ ) lowerCAmelCase__ = tokenizer_p.convert_ids_to_tokens(UpperCAmelCase__ ) # it is expected that only the first Chinese character is not preceded by "##". lowerCAmelCase__ = [ F"##{token}" if idx != 0 else token for idx, token in enumerate(UpperCAmelCase__ ) ] self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
340
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) _snake_case = { 'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'], 'tokenization_perceiver': ['PerceiverTokenizer'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['PerceiverFeatureExtractor'] _snake_case = ['PerceiverImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = [ 'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST', 'PerceiverForImageClassificationConvProcessing', 'PerceiverForImageClassificationFourier', 'PerceiverForImageClassificationLearned', 'PerceiverForMaskedLM', 'PerceiverForMultimodalAutoencoding', 'PerceiverForOpticalFlow', 'PerceiverForSequenceClassification', 'PerceiverLayer', 'PerceiverModel', 'PerceiverPreTrainedModel', ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
294
0
"""simple docstring""" import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=24 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=6 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=1_000 , ): """simple docstring""" snake_case : Dict = parent snake_case : Dict = batch_size snake_case : Tuple = seq_length snake_case : List[str] = is_training snake_case : Optional[int] = use_input_mask snake_case : Any = use_token_type_ids snake_case : List[Any] = use_labels snake_case : Dict = vocab_size snake_case : Optional[Any] = hidden_size snake_case : Any = num_hidden_layers snake_case : Union[str, Any] = num_attention_heads snake_case : List[str] = intermediate_size snake_case : List[Any] = hidden_act snake_case : int = hidden_dropout_prob snake_case : List[Any] = attention_probs_dropout_prob snake_case : Optional[int] = max_position_embeddings snake_case : Union[str, Any] = type_vocab_size snake_case : int = type_sequence_label_size snake_case : Optional[Any] = initializer_range snake_case : Union[str, Any] = num_labels snake_case : List[Any] = scope snake_case : List[str] = range_bbox def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: snake_case : List[str] = bbox[i, j, 3] snake_case : List[Any] = bbox[i, j, 1] snake_case : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: snake_case : List[str] = bbox[i, j, 2] snake_case : Tuple = bbox[i, j, 0] snake_case : Tuple = t snake_case : Any = None if self.use_input_mask: snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) snake_case : Any = None if self.use_token_type_ids: snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case : Optional[Any] = None snake_case : Tuple = None if self.use_labels: snake_case : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case : Tuple = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def lowerCamelCase_ ( self ): """simple docstring""" return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): """simple docstring""" snake_case : List[str] = LiltModel(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() snake_case : List[str] = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) snake_case : List[Any] = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ ) snake_case : Any = model(UpperCAmelCase__ , bbox=UpperCAmelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): """simple docstring""" snake_case : str = self.num_labels snake_case : List[str] = LiltForTokenClassification(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() snake_case : Dict = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ): """simple docstring""" snake_case : List[str] = LiltForQuestionAnswering(config=UpperCAmelCase__ ) model.to(UpperCAmelCase__ ) model.eval() snake_case : str = model( UpperCAmelCase__ , bbox=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[str] = self.prepare_config_and_inputs() ( snake_case ) : Optional[Any] = config_and_inputs snake_case : str = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class lowerCamelCase__ ( snake_case_ , snake_case_ , snake_case_ , unittest.TestCase ): a__ : List[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) a__ : Tuple = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) a__ : Dict = False a__ : Union[str, Any] = False def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" return True def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = LiltModelTester(self ) snake_case : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 ) def lowerCamelCase_ ( self ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case : str = type self.model_tester.create_and_check_model(*UpperCAmelCase__ ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ ) def lowerCamelCase_ ( self ): """simple docstring""" snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ ) @slow def lowerCamelCase_ ( self ): """simple docstring""" for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case : List[Any] = LiltModel.from_pretrained(UpperCAmelCase__ ) self.assertIsNotNone(UpperCAmelCase__ ) @require_torch @slow class lowerCamelCase__ ( unittest.TestCase ): def lowerCamelCase_ ( self ): """simple docstring""" snake_case : Tuple = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(UpperCAmelCase__ ) snake_case : Union[str, Any] = torch.tensor([[1, 2]] , device=UpperCAmelCase__ ) snake_case : List[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=UpperCAmelCase__ ) # forward pass with torch.no_grad(): snake_case : Union[str, Any] = model(input_ids=UpperCAmelCase__ , bbox=UpperCAmelCase__ ) snake_case : str = torch.Size([1, 2, 768] ) snake_case : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=UpperCAmelCase__ , ) self.assertTrue(outputs.last_hidden_state.shape , UpperCAmelCase__ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , UpperCAmelCase__ , atol=1E-3 ) )
148
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self : Optional[int] , UpperCAmelCase__ : float , UpperCAmelCase__ : int ) -> Dict: if k in (0.0_4, 0.0_6): _a : List[str] = k _a : List[Any] = window_size else: raise ValueError("""invalid k value""" ) def __str__( self : Dict ) -> str: return str(self.k ) def _lowercase ( self : int , UpperCAmelCase__ : str ) -> tuple[cva.Mat, list[list[int]]]: _a : Dict = cva.imread(UpperCAmelCase__ , 0 ) _a , _a : List[Any] = img.shape _a : list[list[int]] = [] _a : List[Any] = img.copy() _a : int = cva.cvtColor(UpperCAmelCase__ , cva.COLOR_GRAY2RGB ) _a , _a : Any = np.gradient(UpperCAmelCase__ ) _a : Tuple = dx**2 _a : Union[str, Any] = dy**2 _a : Union[str, Any] = dx * dy _a : int = 0.0_4 _a : List[str] = self.window_size // 2 for y in range(UpperCAmelCase__ , h - offset ): for x in range(UpperCAmelCase__ , w - offset ): _a : str = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : List[Any] = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Tuple = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _a : Any = (wxx * wyy) - (wxy**2) _a : Tuple = wxx + wyy _a : Any = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": _snake_case = HarrisCorner(0.04, 3) _snake_case , _snake_case = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
294
0
"""simple docstring""" import unittest import numpy as np def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any], _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] = None, ): """simple docstring""" _a = np.shape(UpperCamelCase__ ) _a = np.shape(UpperCamelCase__ ) _a = np.shape(UpperCamelCase__ ) if shape_a[0] != shape_b[0]: _a = ( """Expected the same number of rows for A and B. """ f'Instead found A of size {shape_a} and B of size {shape_b}' ) raise ValueError(UpperCamelCase__ ) if shape_b[1] != shape_c[1]: _a = ( """Expected the same number of columns for B and C. """ f'Instead found B of size {shape_b} and C of size {shape_c}' ) raise ValueError(UpperCamelCase__ ) _a = pseudo_inv if a_inv is None: try: _a = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: raise ValueError( '''Input matrix A is not invertible. Cannot compute Schur complement.''' ) return mat_c - mat_b.T @ a_inv @ mat_b class __lowerCamelCase ( unittest.TestCase ): '''simple docstring''' def _UpperCAmelCase ( self ) -> None: _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1], [6, 3]] ) _a = schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) _a = np.block([[a, b], [b.T, c]] ) _a = np.linalg.det(UpperCAmelCase__ ) _a = np.linalg.det(UpperCAmelCase__ ) _a = np.linalg.det(UpperCAmelCase__ ) self.assertAlmostEqual(UpperCAmelCase__ , det_a * det_s ) def _UpperCAmelCase ( self ) -> None: _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1], [6, 3]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def _UpperCAmelCase ( self ) -> None: _a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] ) _a = np.array([[0, 3], [3, 0], [2, 3]] ) _a = np.array([[2, 1, 3], [6, 3, 5]] ) with self.assertRaises(UpperCAmelCase__ ): schur_complement(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() unittest.main()
320
"""simple docstring""" def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ): '''simple docstring''' # Check if the input is valid if not len(UpperCamelCase__ ) == len(UpperCamelCase__ ) == 3: raise ValueError("""Please enter a valid equation.""" ) if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0: raise ValueError("""Both a & b of two equations can't be zero.""" ) # Extract the coefficients _a , _a , _a : Any = equationa _a , _a , _a : Tuple = equationa # Calculate the determinants of the matrices _a : int = aa * ba - aa * ba _a : str = ca * ba - ca * ba _a : str = aa * ca - aa * ca # Check if the system of linear equations has a solution (using Cramer's rule) if determinant == 0: if determinant_x == determinant_y == 0: raise ValueError("""Infinite solutions. (Consistent system)""" ) else: raise ValueError("""No solution. (Inconsistent system)""" ) else: if determinant_x == determinant_y == 0: # Trivial solution (Inconsistent system) return (0.0, 0.0) else: _a : Dict = determinant_x / determinant _a : str = determinant_y / determinant # Non-Trivial Solution (Consistent system) return (x, y)
294
0
"""simple docstring""" def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int: if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): raise ValueError("iterations must be defined as integers" ) if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or not number >= 1: raise ValueError( "starting number must be\n and integer and be more than 0" ) if not iterations >= 1: raise ValueError("Iterations must be done more than 0 times to play FizzBuzz" ) A__ = """""" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(UpperCamelCase__ ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
247
"""simple docstring""" _snake_case = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n' _snake_case = [{'type': 'code', 'content': INSTALL_CONTENT}] _snake_case = { '{processor_class}': 'FakeProcessorClass', '{model_class}': 'FakeModelClass', '{object_class}': 'FakeObjectClass', }
294
0
import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE :List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right SCREAMING_SNAKE_CASE :Dict = 25_60_47 SCREAMING_SNAKE_CASE :Optional[int] = 25_61_45 @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( snake_case_ , unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = NllbTokenizer _SCREAMING_SNAKE_CASE = NllbTokenizerFast _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = {} def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing snake_case_ = NllbTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCAmelCase__ ( self : List[str] ) -> Optional[int]: """simple docstring""" snake_case_ = NllbTokenizer(UpperCAmelCase__ , keep_accents=UpperCAmelCase__ ) snake_case_ = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCAmelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) snake_case_ = tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) snake_case_ = tokenizer.convert_ids_to_tokens(UpperCAmelCase__ ) self.assertListEqual( UpperCAmelCase__ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def lowerCAmelCase__ ( self : Any ) -> List[Any]: """simple docstring""" snake_case_ = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) snake_case_ = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ ) snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase__ ) snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase__ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) snake_case_ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase__ ) snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) shutil.rmtree(UpperCAmelCase__ ) # Save tokenizer rust, legacy_format=True snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ ) snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase__ ) # Checks it save with the same files self.assertSequenceEqual(UpperCAmelCase__ , UpperCAmelCase__ ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase__ ) snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) shutil.rmtree(UpperCAmelCase__ ) # Save tokenizer rust, legacy_format=False snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(UpperCAmelCase__ , legacy_format=UpperCAmelCase__ ) snake_case_ = tokenizer_p.save_pretrained(UpperCAmelCase__ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(UpperCAmelCase__ ) snake_case_ = tokenizer_p.from_pretrained(UpperCAmelCase__ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(UpperCAmelCase__ , UpperCAmelCase__ ) ) shutil.rmtree(UpperCAmelCase__ ) @require_torch def lowerCAmelCase__ ( self : str ) -> List[str]: """simple docstring""" if not self.test_seqaseq: return snake_case_ = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Longer text that will definitely require truncation. snake_case_ = [ """ UN Chief Says There Is No Military Solution in Syria""", """ Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for""" """ Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons""" """ will only worsen the violence and misery for millions of people.""", ] snake_case_ = [ """Şeful ONU declară că nu există o soluţie militară în Siria""", """Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al""" """ Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi""" """ că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""", ] try: snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase__ , tgt_texts=UpperCAmelCase__ , max_length=3 , max_target_length=1_0 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 1_0 ) # max_target_length will default to max_length if not specified snake_case_ = tokenizer.prepare_seqaseq_batch( UpperCAmelCase__ , tgt_texts=UpperCAmelCase__ , max_length=3 , return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) snake_case_ = tokenizer.prepare_seqaseq_batch( src_texts=UpperCAmelCase__ , max_length=3 , max_target_length=1_0 , return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("decoder_input_ids" , UpperCAmelCase__ ) @unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." ) def lowerCAmelCase__ ( self : List[Any] ) -> Dict: """simple docstring""" pass def lowerCAmelCase__ ( self : Any ) -> Optional[Any]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = [AddedToken("<special>" , lstrip=UpperCAmelCase__ )] snake_case_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ ) snake_case_ = tokenizer_r.encode("Hey this is a <special> token" ) snake_case_ = tokenizer_r.encode("<special>" , add_special_tokens=UpperCAmelCase__ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: snake_case_ = self.rust_tokenizer_class.from_pretrained( UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , ) snake_case_ = self.tokenizer_class.from_pretrained( UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , **UpperCAmelCase__ ) snake_case_ = tokenizer_p.encode("Hey this is a <special> token" ) snake_case_ = tokenizer_cr.encode("Hey this is a <special> token" ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _SCREAMING_SNAKE_CASE = '''facebook/nllb-200-distilled-600M''' _SCREAMING_SNAKE_CASE = [ ''' UN Chief Says There Is No Military Solution in Syria''', ''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''', ] _SCREAMING_SNAKE_CASE = [ '''Şeful ONU declară că nu există o soluţie militară în Siria''', '''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei''' ''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor''' ''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''', ] _SCREAMING_SNAKE_CASE = [ 256047, 16297, 134408, 8165, 248066, 14734, 950, 1135, 105721, 3573, 83, 27352, 108, 49486, 2, ] @classmethod def lowerCAmelCase__ ( cls : List[Any] ) -> List[Any]: """simple docstring""" snake_case_ = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" ) snake_case_ = 1 return cls def lowerCAmelCase__ ( self : Dict ) -> Any: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 2_5_6_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 2_5_6_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 2_5_6_0_5_7 ) def lowerCAmelCase__ ( self : str ) -> str: """simple docstring""" snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : List[str] ) -> List[Any]: """simple docstring""" self.assertIn(UpperCAmelCase__ , self.tokenizer.all_special_ids ) # fmt: off snake_case_ = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7] # fmt: on snake_case_ = self.tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Any ) -> List[str]: """simple docstring""" snake_case_ = ["""this is gunna be a long sentence """ * 2_0] assert isinstance(src_text[0] , UpperCAmelCase__ ) snake_case_ = 1_0 snake_case_ = self.tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__ , truncation=UpperCAmelCase__ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , UpperCAmelCase__ ) self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ ) def lowerCAmelCase__ ( self : Tuple ) -> Optional[Any]: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [2_5_6_2_0_3, 3] ) def lowerCAmelCase__ ( self : int ) -> int: """simple docstring""" snake_case_ = tempfile.mkdtemp() snake_case_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(UpperCAmelCase__ ) snake_case_ = NllbTokenizer.from_pretrained(UpperCAmelCase__ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase__ ) @require_torch def lowerCAmelCase__ ( self : List[Any] ) -> Tuple: """simple docstring""" snake_case_ = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) snake_case_ = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] ) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ ) self.assertEqual((2, 1_5) , batch.input_ids.shape ) self.assertEqual((2, 1_5) , batch.attention_mask.shape ) snake_case_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , UpperCAmelCase__ ) self.assertEqual(UpperCAmelCase__ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" snake_case_ = self.tokenizer(self.src_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=3 , return_tensors="pt" ) snake_case_ = self.tokenizer( text_target=self.tgt_text , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=1_0 , return_tensors="pt" ) snake_case_ = targets["""input_ids"""] snake_case_ = shift_tokens_right( UpperCAmelCase__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def lowerCAmelCase__ ( self : List[str] ) -> Any: """simple docstring""" snake_case_ = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( nested_simplify(UpperCAmelCase__ ) , { # A, test, EOS, en_XX "input_ids": [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 2_5_6_0_5_7, } , ) @require_torch def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case_ = True snake_case_ = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] ) snake_case_ = False snake_case_ = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
159
"""simple docstring""" import warnings from ...configuration_utils import PretrainedConfig from ...utils import logging _snake_case = logging.get_logger(__name__) _snake_case = { 'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json', } class UpperCamelCase ( snake_case_ ): UpperCamelCase : int = '''mvp''' UpperCamelCase : Union[str, Any] = ['''past_key_values'''] UpperCamelCase : Any = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCAmelCase__ : List[str]=50267 , UpperCAmelCase__ : Optional[Any]=1024 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : Optional[Any]=4096 , UpperCAmelCase__ : int=16 , UpperCAmelCase__ : Tuple=12 , UpperCAmelCase__ : int=4096 , UpperCAmelCase__ : List[Any]=16 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Union[str, Any]=1024 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : Dict=0.0 , UpperCAmelCase__ : Tuple=0.0_2 , UpperCAmelCase__ : Tuple=0.0 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Tuple=1 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : Union[str, Any]=2 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Any=2 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Dict=100 , UpperCAmelCase__ : Union[str, Any]=800 , **UpperCAmelCase__ : Dict , ) -> List[Any]: _a : Any = vocab_size _a : Any = max_position_embeddings _a : Union[str, Any] = d_model _a : List[str] = encoder_ffn_dim _a : List[Any] = encoder_layers _a : Dict = encoder_attention_heads _a : Tuple = decoder_ffn_dim _a : List[Any] = decoder_layers _a : Optional[Any] = decoder_attention_heads _a : Optional[Any] = dropout _a : str = attention_dropout _a : Dict = activation_dropout _a : Any = activation_function _a : Tuple = init_std _a : Dict = encoder_layerdrop _a : Optional[int] = decoder_layerdrop _a : Optional[Any] = classifier_dropout _a : List[Any] = use_cache _a : Dict = encoder_layers _a : str = scale_embedding # scale factor will be sqrt(d_model) if True _a : int = use_prompt _a : Dict = prompt_length _a : Dict = prompt_mid_dim super().__init__( pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , ) if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , UpperCAmelCase__ ): _a : List[str] = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ """The config can simply be saved and uploaded again to be fixed.""" )
294
0