code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed lowerCAmelCase__ : str = logging.getLogger(__name__) def UpperCamelCase__ ( A__=2 , A__=3 , A__=16 , A__ = 10 , A__ = 2 ) -> int: def get_dataset(A__ ): snake_case__ : Union[str, Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(SCREAMING_SNAKE_CASE__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) snake_case__ : str = get_dataset(SCREAMING_SNAKE_CASE__ ) snake_case__ : Any = get_dataset(SCREAMING_SNAKE_CASE__ ) snake_case__ : Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 ) snake_case__ : Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__=None ) -> Optional[int]: snake_case__ : Dict = [] for epoch in range(SCREAMING_SNAKE_CASE__ ): # Train quickly model.train() for batch in dataloader: snake_case__ : Optional[Any] = batch snake_case__ : int = model(SCREAMING_SNAKE_CASE__ ) snake_case__ : Optional[int] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) accelerator.backward(SCREAMING_SNAKE_CASE__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class __snake_case ( nn.Module ): def __init__( self ) -> str: '''simple docstring''' super().__init__() snake_case__ : Optional[int] = nn.Parameter(torch.randn(1 ) ) snake_case__ : int = nn.Parameter(torch.randn(1 ) ) def __a ( self , __UpperCamelCase ) -> int: '''simple docstring''' return x * self.a + self.b class __snake_case ( unittest.TestCase ): def __a ( self ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ : Optional[Any] = DummyModel() snake_case__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) snake_case__ : Tuple = dummy_dataloaders() snake_case__ : Tuple = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline snake_case__ : Dict = Accelerator(project_config=SCREAMING_SNAKE_CASE_ ) snake_case__ : Union[str, Any] = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __a ( self ) -> str: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ : List[str] = DummyModel() snake_case__ : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) snake_case__ : Dict = dummy_dataloaders() # Train baseline snake_case__ : Dict = Accelerator() snake_case__ : int = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial snake_case__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , 'initial' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) (snake_case__) : Optional[Any] = model.a.item(), model.b.item() snake_case__ : Optional[int] = optimizer.state_dict() snake_case__ : Optional[int] = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) (snake_case__) : Dict = model.a.item(), model.b.item() snake_case__ : Optional[Any] = optimizer.state_dict() # Train partially set_seed(42 ) snake_case__ : Any = DummyModel() snake_case__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) snake_case__ : List[Any] = dummy_dataloaders() snake_case__ : List[str] = Accelerator() snake_case__ : Tuple = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(SCREAMING_SNAKE_CASE_ ) (snake_case__) : Tuple = model.a.item(), model.b.item() snake_case__ : Tuple = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) snake_case__ : Optional[int] = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything snake_case__ : Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoint' ) accelerator.save_state(SCREAMING_SNAKE_CASE_ ) # Load everything back in and make sure all states work accelerator.load_state(SCREAMING_SNAKE_CASE_ ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) (snake_case__) : Union[str, Any] = model.a.item(), model.b.item() snake_case__ : Optional[Any] = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __a ( self ) -> List[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ : List[Any] = DummyModel() snake_case__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) snake_case__ : int = dummy_dataloaders() snake_case__ : int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline snake_case__ : Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) snake_case__ : Optional[Any] = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() (snake_case__) : List[str] = model.a.item(), model.b.item() snake_case__ : Dict = optimizer.state_dict() snake_case__ : Any = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) (snake_case__) : Optional[int] = model.a.item(), model.b.item() snake_case__ : Any = optimizer.state_dict() # Train partially set_seed(42 ) snake_case__ : Union[str, Any] = DummyModel() snake_case__ : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) snake_case__ : Tuple = dummy_dataloaders() snake_case__ : Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) snake_case__ : Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) snake_case__ : List[str] = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_0' ) ) (snake_case__) : Dict = model.a.item(), model.b.item() snake_case__ : Dict = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) snake_case__ : Any = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_1' ) ) test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) (snake_case__) : Optional[Any] = model.a.item(), model.b.item() snake_case__ : str = optimizer.state_dict() self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' snake_case__ : List[Any] = torch.tensor([1, 2, 3] ) snake_case__ : Any = torch.tensor([2, 3, 4] ) snake_case__ : Optional[Any] = DummyModel() snake_case__ : Optional[Any] = torch.optim.Adam(net.parameters() ) snake_case__ : Optional[Any] = Accelerator() with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve: accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) snake_case__ : Optional[Any] = str(ve.exception ) self.assertTrue('Item at index 0' in message ) self.assertTrue('Item at index 1' in message ) self.assertFalse('Item at index 2' in message ) self.assertFalse('Item at index 3' in message ) def __a ( self ) -> Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ : List[Any] = DummyModel() snake_case__ : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) snake_case__ : Any = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.9_9 ) snake_case__ : Any = dummy_dataloaders() snake_case__ : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ ) # Train baseline snake_case__ : str = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) snake_case__ : Tuple = accelerator.prepare( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # Save initial accelerator.save_state() snake_case__ : int = scheduler.state_dict() train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_0' ) ) self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() ) def __a ( self ) -> Union[str, Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdir: set_seed(42 ) snake_case__ : Optional[Any] = DummyModel() snake_case__ : int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 ) # Train baseline snake_case__ : Tuple = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ ) snake_case__ : List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE_ ) # Save 3 states: for _ in range(11 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_0' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_9' ) ) ) self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , 'checkpoints' , 'checkpoint_10' ) ) ) @require_cuda def __a ( self ) -> int: '''simple docstring''' snake_case__ : int = ['''torchrun''', F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase__ : Tuple = '''/tmp/accelerate/state_checkpointing''' lowerCAmelCase__ : List[str] = DummyModel() lowerCAmelCase__ : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3) lowerCAmelCase__ : str = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) lowerCAmelCase__, lowerCAmelCase__ : List[str] = dummy_dataloaders() lowerCAmelCase__ : Dict = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline lowerCAmelCase__ : Any = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='''no''') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : Union[str, Any] = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) lowerCAmelCase__, lowerCAmelCase__ : List[str] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: lowerCAmelCase__ : Union[str, Any] = group['''params'''][0].device break assert param_device.type == accelerator.device.type lowerCAmelCase__ : Optional[int] = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''cpu''') for group in optimizer.param_groups: lowerCAmelCase__ : List[str] = group['''params'''][0].device break assert ( param_device.type == torch.device('''cpu''').type ), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''on_device''') for group in optimizer.param_groups: lowerCAmelCase__ : Optional[Any] = group['''params'''][0].device break assert ( param_device.type == accelerator.device.type ), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='''Unsupported optimizer map location passed'''): accelerator.load_state(os.path.join(savedir, '''checkpoints''', '''checkpoint_0'''), map_location='''invalid''') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
143
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder __snake_case = """__DUMMY_TRANSFORMERS_USER__""" __snake_case = """Dummy User""" __snake_case = """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""" __snake_case = """https://hub-ci.huggingface.co""" __snake_case = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" __snake_case = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" __snake_case = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def _A ( SCREAMING_SNAKE_CASE__ : Tuple ): monkeypatch.setattr( '''huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE''' , SCREAMING_SNAKE_CASE__ ) @pytest.fixture def _A ( SCREAMING_SNAKE_CASE__ : Any ): monkeypatch.setattr('''datasets.config.HF_ENDPOINT''' , SCREAMING_SNAKE_CASE__ ) monkeypatch.setattr('''datasets.config.HUB_DATASETS_URL''' , SCREAMING_SNAKE_CASE__ ) @pytest.fixture def _A ( SCREAMING_SNAKE_CASE__ : List[str] ): monkeypatch.setattr('''huggingface_hub.hf_api.HfFolder.path_token''' , SCREAMING_SNAKE_CASE__ ) @pytest.fixture def _A ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ): HfFolder.save_token(SCREAMING_SNAKE_CASE__ ) yield HfFolder.delete_token() @pytest.fixture(scope='''session''' ) def _A ( ): return HfApi(endpoint=SCREAMING_SNAKE_CASE__ ) @pytest.fixture(scope='''session''' ) def _A ( SCREAMING_SNAKE_CASE__ : HfApi ): UpperCamelCase :Tuple = HfFolder.get_token() HfFolder.save_token(SCREAMING_SNAKE_CASE__ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(SCREAMING_SNAKE_CASE__ ) @pytest.fixture def _A ( SCREAMING_SNAKE_CASE__ : Dict ): def _cleanup_repo(SCREAMING_SNAKE_CASE__ : Tuple ): hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) return _cleanup_repo @pytest.fixture def _A ( SCREAMING_SNAKE_CASE__ : Tuple ): @contextmanager def _temporary_repo(SCREAMING_SNAKE_CASE__ : Any ): try: yield repo_id finally: cleanup_repo(SCREAMING_SNAKE_CASE__ ) return _temporary_repo @pytest.fixture(scope='''session''' ) def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ): UpperCamelCase :Union[str, Any] = F'''repo_txt_data-{int(time.time() * 1_0e3 )}''' UpperCamelCase :int = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data/text_data.txt''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict ): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='''session''' ) def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ): UpperCamelCase :Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 1_0e3 )}''' UpperCamelCase :Any = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='''session''' ) def _A ( SCREAMING_SNAKE_CASE__ : HfApi , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ): UpperCamelCase :Dict = F'''repo_zipped_img_data-{int(time.time() * 1_0e3 )}''' UpperCamelCase :Dict = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , private=SCREAMING_SNAKE_CASE__ ) hf_api.upload_file( token=SCREAMING_SNAKE_CASE__ , path_or_fileobj=str(SCREAMING_SNAKE_CASE__ ) , path_in_repo='''data.zip''' , repo_id=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' , ) yield repo_id try: hf_api.delete_repo(SCREAMING_SNAKE_CASE__ , token=SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ): return hf_private_dataset_repo_zipped_img_data_
259
0
'''simple docstring''' import unittest from transformers.utils.backbone_utils import ( BackboneMixin, get_aligned_output_features_output_indices, verify_out_features_out_indices, ) class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]: '''simple docstring''' _UpperCAmelCase : List[Any] = ["a", "b", "c"] # Defaults to last layer if both are None _UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["c"] ) self.assertEqual(lowerCamelCase__ , [2] ) # Out indices set to match out features _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_aligned_output_features_output_indices(["a", "c"] , lowerCamelCase__ , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["a", "c"] ) self.assertEqual(lowerCamelCase__ , [0, 2] ) # Out features set to match out indices _UpperCAmelCase , _UpperCAmelCase : Dict = get_aligned_output_features_output_indices(lowerCamelCase__ , [0, 2] , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["a", "c"] ) self.assertEqual(lowerCamelCase__ , [0, 2] ) # Out features selected from negative indices _UpperCAmelCase , _UpperCAmelCase : str = get_aligned_output_features_output_indices(lowerCamelCase__ , [-3, -1] , lowerCamelCase__ ) self.assertEqual(lowerCamelCase__ , ["a", "c"] ) self.assertEqual(lowerCamelCase__ , [-3, -1] ) def lowerCAmelCase__ ( self : List[str] ) ->str: '''simple docstring''' with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , lowerCamelCase__ ) # Out features must be a list with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(("a", "b") , (0, 1) , ["a", "b"] ) # Out features must be a subset of stage names with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0, 1) , ["a"] ) # Out indices must be a list or tuple with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(lowerCamelCase__ , 0 , ["a", "b"] ) # Out indices must be a subset of stage names with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(lowerCamelCase__ , (0, 1) , ["a"] ) # Out features and out indices must be the same length with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0,) , ["a", "b", "c"] ) # Out features should match out indices with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["a", "b"] , (0, 2) , ["a", "b", "c"] ) # Out features and out indices should be in order with self.assertRaises(lowerCamelCase__ ): verify_out_features_out_indices(["b", "a"] , (0, 1) , ["a", "b"] ) # Check passes with valid inputs verify_out_features_out_indices(["a", "b", "d"] , (0, 1, -1) , ["a", "b", "c", "d"] ) def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]: '''simple docstring''' _UpperCAmelCase : Any = BackboneMixin() _UpperCAmelCase : str = ["a", "b", "c"] _UpperCAmelCase : Optional[int] = ["a", "c"] _UpperCAmelCase : Optional[int] = [0, 2] # Check that the output features and indices are set correctly self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [0, 2] ) # Check out features and indices are updated correctly _UpperCAmelCase : Optional[int] = ["a", "b"] self.assertEqual(backbone.out_features , ["a", "b"] ) self.assertEqual(backbone.out_indices , [0, 1] ) _UpperCAmelCase : List[Any] = [-3, -1] self.assertEqual(backbone.out_features , ["a", "c"] ) self.assertEqual(backbone.out_indices , [-3, -1] )
322
'''simple docstring''' import pytest lowerCamelCase__ = '__dummy_dataset1__' lowerCamelCase__ = '\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = "https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/"\nURLS = {"train": REPO_URL + "wikiann-bn-train.jsonl", "validation": REPO_URL + "wikiann-bn-validation.jsonl"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n "tokens": datasets.Sequence(datasets.Value("string")),\n "ner_tags": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n "O",\n "B-PER",\n "I-PER",\n "B-ORG",\n "I-ORG",\n "B-LOC",\n "I-LOC",\n ]\n )\n ),\n "langs": datasets.Sequence(datasets.Value("string")),\n "spans": datasets.Sequence(datasets.Value("string")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["validation"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, "r", encoding="utf-8") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n' @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def __lowerCAmelCase (): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): _UpperCAmelCase : Optional[Any] = dataset_loading_script_name _UpperCAmelCase : Any = tmp_path / "datasets" / script_name script_dir.mkdir(parents=__lowerCAmelCase ) _UpperCAmelCase : Optional[Any] = script_dir / F"""{script_name}.py""" with open(__lowerCAmelCase , "w" ) as f: f.write(__lowerCAmelCase ) return str(__lowerCAmelCase )
322
1
from __future__ import annotations import matplotlib.pyplot as plt # type: ignore import numpy # initial triangle of Koch snowflake lowercase_ = numpy.array([0, 0]) lowercase_ = numpy.array([0.5, 0.866_0254]) lowercase_ = numpy.array([1, 0]) lowercase_ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1] def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[str] = initial_vectors for _ in range(snake_case ): __SCREAMING_SNAKE_CASE : Dict = iteration_step(snake_case ) return vectors def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = [] for i, start_vector in enumerate(vectors[:-1] ): __SCREAMING_SNAKE_CASE : str = vectors[i + 1] new_vectors.append(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = end_vector - start_vector new_vectors.append(start_vector + difference_vector / 3 ) new_vectors.append( start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) ) new_vectors.append(start_vector + difference_vector * 2 / 3 ) new_vectors.append(vectors[-1] ) return new_vectors def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = numpy.radians(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = numpy.cos(snake_case ), numpy.sin(snake_case ) __SCREAMING_SNAKE_CASE : Any = numpy.array(((c, -s), (s, c)) ) return numpy.dot(snake_case , snake_case ) def a__ ( snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = plt.gca() axes.set_aspect('''equal''' ) # matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all # y-coordinates as inputs, which are constructed from the vector-list using # zip() __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = zip(*snake_case ) plt.plot(snake_case , snake_case ) plt.show() if __name__ == "__main__": import doctest doctest.testmod() lowercase_ = iterate(INITIAL_VECTORS, 5) plot(processed_vectors)
303
import numpy as np import torch from imwatermark import WatermarkEncoder # Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66 lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110 # bin(x)[2:] gives bits of x as str, use int to convert them to 0/1 lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]] class __UpperCamelCase : """simple docstring""" def __init__( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS __SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder() self.encoder.set_watermark('''bits''' , self.watermark ) def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ): """simple docstring""" if images.shape[-1] < 256: return images __SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy() __SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images] __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 ) __SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 ) return images
303
1
def lowerCamelCase_ ( _a : str , _a : int ): '''simple docstring''' UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_a )] UpperCAmelCase_ : List[str] = key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1 or len(_a ) <= key: return input_string for position, character in enumerate(_a ): UpperCAmelCase_ : List[str] = position % (lowest * 2) # puts it in bounds UpperCAmelCase_ : Optional[Any] = min(_a , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(_a ) UpperCAmelCase_ : int = ["""""".join(_a ) for row in temp_grid] UpperCAmelCase_ : int = """""".join(_a ) return output_string def lowerCamelCase_ ( _a : str , _a : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = [] UpperCAmelCase_ : Union[str, Any] = key - 1 if key <= 0: raise ValueError("""Height of grid can't be 0 or negative""" ) if key == 1: return input_string UpperCAmelCase_ : list[list[str]] = [[] for _ in range(_a )] # generates template for position in range(len(_a ) ): UpperCAmelCase_ : Dict = position % (lowest * 2) # puts it in bounds UpperCAmelCase_ : Optional[int] = min(_a , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append("""*""" ) UpperCAmelCase_ : Any = 0 for row in temp_grid: # fills in the characters UpperCAmelCase_ : List[str] = input_string[counter : counter + len(_a )] grid.append(list(_a ) ) counter += len(_a ) UpperCAmelCase_ : Any = """""" # reads as zigzag for position in range(len(_a ) ): UpperCAmelCase_ : Optional[Any] = position % (lowest * 2) # puts it in bounds UpperCAmelCase_ : str = min(_a , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def lowerCamelCase_ ( _a : str ): '''simple docstring''' UpperCAmelCase_ : Any = {} for key_guess in range(1 , len(_a ) ): # tries every key UpperCAmelCase_ : str = decrypt(_a , _a ) return results if __name__ == "__main__": import doctest doctest.testmod()
59
from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class _snake_case ( __snake_case , __snake_case ): '''simple docstring''' @register_to_config def __init__( self: Dict ,lowerCamelCase_: bool ,lowerCamelCase_: Optional[int] = None ,lowerCamelCase_: Optional[int] = None ) -> str: super().__init__() UpperCAmelCase_ : Tuple = learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" UpperCAmelCase_ : str = torch.zeros(lowerCamelCase_ ,lowerCamelCase_ ) else: UpperCAmelCase_ : Any = None UpperCAmelCase_ : Optional[int] = torch.nn.Parameter(lowerCamelCase_ ) class _snake_case ( __snake_case ): '''simple docstring''' A__ : VQModel A__ : CLIPTextModel A__ : CLIPTokenizer A__ : TransformeraDModel A__ : LearnedClassifierFreeSamplingEmbeddings A__ : VQDiffusionScheduler def __init__( self: List[Any] ,lowerCamelCase_: VQModel ,lowerCamelCase_: CLIPTextModel ,lowerCamelCase_: CLIPTokenizer ,lowerCamelCase_: TransformeraDModel ,lowerCamelCase_: VQDiffusionScheduler ,lowerCamelCase_: LearnedClassifierFreeSamplingEmbeddings ,) -> Tuple: super().__init__() self.register_modules( vqvae=lowerCamelCase_ ,transformer=lowerCamelCase_ ,text_encoder=lowerCamelCase_ ,tokenizer=lowerCamelCase_ ,scheduler=lowerCamelCase_ ,learned_classifier_free_sampling_embeddings=lowerCamelCase_ ,) def A__ ( self: Optional[int] ,lowerCamelCase_: Optional[Any] ,lowerCamelCase_: Tuple ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]: UpperCAmelCase_ : Union[str, Any] = len(lowerCamelCase_ ) if isinstance(lowerCamelCase_ ,lowerCamelCase_ ) else 1 # get prompt text embeddings UpperCAmelCase_ : Optional[int] = self.tokenizer( lowerCamelCase_ ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,) UpperCAmelCase_ : Optional[int] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase_ : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase_ : Any = text_input_ids[:, : self.tokenizer.model_max_length] UpperCAmelCase_ : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ ) # duplicate text embeddings for each generation per prompt UpperCAmelCase_ : Optional[int] = prompt_embeds.repeat_interleave(lowerCamelCase_ ,dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: UpperCAmelCase_ : str = self.learned_classifier_free_sampling_embeddings.embeddings UpperCAmelCase_ : str = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase_ ,1 ,1 ) else: UpperCAmelCase_ : Dict = [""""""] * batch_size UpperCAmelCase_ : Tuple = text_input_ids.shape[-1] UpperCAmelCase_ : Optional[Any] = self.tokenizer( lowerCamelCase_ ,padding="""max_length""" ,max_length=lowerCamelCase_ ,truncation=lowerCamelCase_ ,return_tensors="""pt""" ,) UpperCAmelCase_ : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings UpperCAmelCase_ : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 ,keepdim=lowerCamelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase_ : List[str] = negative_prompt_embeds.shape[1] UpperCAmelCase_ : Optional[Any] = negative_prompt_embeds.repeat(1 ,lowerCamelCase_ ,1 ) UpperCAmelCase_ : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,lowerCamelCase_ ,-1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase_ : Optional[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self: Tuple ,lowerCamelCase_: Union[str, List[str]] ,lowerCamelCase_: int = 100 ,lowerCamelCase_: float = 5.0 ,lowerCamelCase_: float = 1.0 ,lowerCamelCase_: int = 1 ,lowerCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase_: Optional[torch.FloatTensor] = None ,lowerCamelCase_: Optional[str] = "pil" ,lowerCamelCase_: bool = True ,lowerCamelCase_: Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase_: int = 1 ,) -> Union[ImagePipelineOutput, Tuple]: if isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Tuple = 1 elif isinstance(lowerCamelCase_ ,lowerCamelCase_ ): UpperCAmelCase_ : Optional[Any] = len(lowerCamelCase_ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase_ )}''' ) UpperCAmelCase_ : Any = batch_size * num_images_per_prompt UpperCAmelCase_ : Optional[Any] = guidance_scale > 1.0 UpperCAmelCase_ : int = self._encode_prompt(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowerCamelCase_ )}.''' ) # get the initial completely masked latents unless the user supplied it UpperCAmelCase_ : Tuple = (batch_size, self.transformer.num_latent_pixels) if latents is None: UpperCAmelCase_ : Optional[int] = self.transformer.num_vector_embeds - 1 UpperCAmelCase_ : Tuple = torch.full(lowerCamelCase_ ,lowerCamelCase_ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( """Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,""" F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) UpperCAmelCase_ : Any = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(lowerCamelCase_ ,device=self.device ) UpperCAmelCase_ : Optional[int] = self.scheduler.timesteps.to(self.device ) UpperCAmelCase_ : str = latents for i, t in enumerate(self.progress_bar(lowerCamelCase_ ) ): # expand the sample if we are doing classifier free guidance UpperCAmelCase_ : Any = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` UpperCAmelCase_ : Optional[Any] = self.transformer(lowerCamelCase_ ,encoder_hidden_states=lowerCamelCase_ ,timestep=lowerCamelCase_ ).sample if do_classifier_free_guidance: UpperCAmelCase_ , UpperCAmelCase_ : Any = model_output.chunk(2 ) UpperCAmelCase_ : Dict = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(lowerCamelCase_ ,dim=1 ,keepdim=lowerCamelCase_ ) UpperCAmelCase_ : Any = self.truncate(lowerCamelCase_ ,lowerCamelCase_ ) # remove `log(0)`'s (`-inf`s) UpperCAmelCase_ : List[Any] = model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase_ : Dict = self.scheduler.step(lowerCamelCase_ ,timestep=lowerCamelCase_ ,sample=lowerCamelCase_ ,generator=lowerCamelCase_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) UpperCAmelCase_ : Optional[Any] = self.vqvae.config.vq_embed_dim UpperCAmelCase_ : str = (batch_size, self.transformer.height, self.transformer.width, embedding_channels) UpperCAmelCase_ : Any = self.vqvae.quantize.get_codebook_entry(lowerCamelCase_ ,shape=lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = self.vqvae.decode(lowerCamelCase_ ,force_not_quantize=lowerCamelCase_ ).sample UpperCAmelCase_ : Optional[int] = (image / 2 + 0.5).clamp(0 ,1 ) UpperCAmelCase_ : str = image.cpu().permute(0 ,2 ,3 ,1 ).numpy() if output_type == "pil": UpperCAmelCase_ : Tuple = self.numpy_to_pil(lowerCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowerCamelCase_ ) def A__ ( self: Any ,lowerCamelCase_: torch.FloatTensor ,lowerCamelCase_: float ) -> torch.FloatTensor: UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = torch.sort(lowerCamelCase_ ,1 ,descending=lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.exp(lowerCamelCase_ ) UpperCAmelCase_ : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out UpperCAmelCase_ : Any = torch.full_like(keep_mask[:, 0:1, :] ,lowerCamelCase_ ) UpperCAmelCase_ : Any = torch.cat((all_true, keep_mask) ,dim=1 ) UpperCAmelCase_ : Tuple = keep_mask[:, :-1, :] UpperCAmelCase_ : Dict = keep_mask.gather(1 ,indices.argsort(1 ) ) UpperCAmelCase_ : List[Any] = log_p_x_0.clone() UpperCAmelCase_ : Optional[int] = -torch.inf # -inf = log(0) return rv
59
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { "uclanlp/visualbert-vqa": "https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json", "uclanlp/visualbert-vqa-pre": "https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json", "uclanlp/visualbert-vqa-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-vcr": "https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json", "uclanlp/visualbert-vcr-pre": "https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json", "uclanlp/visualbert-vcr-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json" ), "uclanlp/visualbert-nlvr2": "https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json", "uclanlp/visualbert-nlvr2-pre": "https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json", "uclanlp/visualbert-nlvr2-coco-pre": ( "https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json" ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class lowerCamelCase ( _lowerCAmelCase ): '''simple docstring''' _A : int = """visual_bert""" def __init__( self: List[Any] , snake_case: Union[str, Any]=30_522 , snake_case: Dict=768 , snake_case: Any=512 , snake_case: Any=12 , snake_case: Any=12 , snake_case: List[Any]=3_072 , snake_case: int="gelu" , snake_case: int=0.1 , snake_case: str=0.1 , snake_case: str=512 , snake_case: Dict=2 , snake_case: int=0.0_2 , snake_case: Optional[int]=1E-12 , snake_case: str=False , snake_case: List[Any]=True , snake_case: Union[str, Any]=1 , snake_case: Optional[Any]=0 , snake_case: Tuple=2 , **snake_case: Union[str, Any] , ) -> Union[str, Any]: super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case ) snake_case_ :Optional[int] = vocab_size snake_case_ :Optional[int] = max_position_embeddings snake_case_ :Union[str, Any] = hidden_size snake_case_ :Optional[int] = visual_embedding_dim snake_case_ :int = num_hidden_layers snake_case_ :Optional[int] = num_attention_heads snake_case_ :Optional[int] = intermediate_size snake_case_ :str = hidden_act snake_case_ :Optional[Any] = hidden_dropout_prob snake_case_ :str = attention_probs_dropout_prob snake_case_ :List[Any] = initializer_range snake_case_ :Optional[Any] = type_vocab_size snake_case_ :Tuple = layer_norm_eps snake_case_ :Optional[Any] = bypass_transformer snake_case_ :List[str] = special_visual_initialize
66
"""simple docstring""" from __future__ import annotations def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]: print(f"""Vertex\tShortest Distance from vertex {src}""" ) for i, d in enumerate(_SCREAMING_SNAKE_CASE ): print(f"""{i}\t\t{d}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: return True return False def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[float]: snake_case_ = [float("""inf""" )] * vertex_count snake_case_ = 0.0 for _ in range(vertex_count - 1 ): for j in range(_SCREAMING_SNAKE_CASE ): snake_case_ , snake_case_ , snake_case_ = (graph[j][k] for k in ["""src""", """dst""", """weight"""]) if distance[u] != float("""inf""" ) and distance[u] + w < distance[v]: snake_case_ = distance[u] + w snake_case_ = check_negative_cycle(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if negative_cycle_exists: raise Exception("""Negative cycle found""" ) return distance if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : int = int(input('Enter number of vertices: ').strip()) __SCREAMING_SNAKE_CASE : Dict = int(input('Enter number of edges: ').strip()) __SCREAMING_SNAKE_CASE : list[dict[str, int]] = [{} for _ in range(E)] for i in range(E): print('Edge ', i + 1) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = ( int(x) for x in input('Enter source, destination, weight: ').strip().split(' ') ) __SCREAMING_SNAKE_CASE : Union[str, Any] = {'src': src, 'dst': dest, 'weight': weight} __SCREAMING_SNAKE_CASE : Union[str, Any] = int(input('\nEnter shortest path source:').strip()) __SCREAMING_SNAKE_CASE : str = bellman_ford(graph, V, E, source) print_distance(shortest_distance, 0)
347
0
"""simple docstring""" import argparse from collections import defaultdict def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): """simple docstring""" lowerCAmelCase__ = F'{file}_{class_name}_{test_name}' done_test[_id] += 1 with open(lowerCAmelCase_ , "r" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = F'class {class_name}(' lowerCAmelCase__ = F'{4 * " "}def {test_name}(' lowerCAmelCase__ = F'{8 * " "}{correct_line.split()[0]}' lowerCAmelCase__ = F'{16 * " "}{correct_line.split()[0]}' lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = False lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = [] for line in lines: if line.startswith(lowerCAmelCase_ ): lowerCAmelCase__ = True elif in_class and line.startswith(lowerCAmelCase_ ): lowerCAmelCase__ = True elif in_class and in_func and (line.startswith(lowerCAmelCase_ ) or line.startswith(lowerCAmelCase_ )): lowerCAmelCase__ = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: lowerCAmelCase__ = True if in_class and in_func and in_line: if ")" not in line: continue else: lowerCAmelCase__ = True if in_class and in_func and in_line and insert_line: new_lines.append(F'{spaces * " "}{correct_line}' ) lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = lowerCAmelCase__ = False else: new_lines.append(lowerCAmelCase_ ) with open(lowerCAmelCase_ , "w" ) as f: for line in new_lines: f.write(lowerCAmelCase_ ) def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Any=None ): """simple docstring""" if fail is not None: with open(lowerCAmelCase_ , "r" ) as f: lowerCAmelCase__ = {l.strip() for l in f.readlines()} else: lowerCAmelCase__ = None with open(lowerCAmelCase_ , "r" ) as f: lowerCAmelCase__ = f.readlines() lowerCAmelCase__ = defaultdict(lowerCAmelCase_ ) for line in correct_lines: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = line.split(";" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('--correct_filename', help='filename of tests with expected result') parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None) UpperCamelCase = parser.parse_args() main(args.correct_filename, args.fail_filename)
359
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder UpperCamelCase = '__DUMMY_TRANSFORMERS_USER__' UpperCamelCase = 'Dummy User' UpperCamelCase = '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' UpperCamelCase = 'https://hub-ci.huggingface.co' UpperCamelCase = CI_HUB_ENDPOINT + '/datasets/{repo_id}/resolve/{revision}/{path}' UpperCamelCase = CI_HUB_ENDPOINT + '/{repo_id}/resolve/{revision}/{filename}' UpperCamelCase = Path('~/.huggingface/hub_ci_token').expanduser() @pytest.fixture def _A ( lowerCAmelCase_ : Dict ): """simple docstring""" monkeypatch.setattr( "huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE" , lowerCAmelCase_ ) @pytest.fixture def _A ( lowerCAmelCase_ : int ): """simple docstring""" monkeypatch.setattr("datasets.config.HF_ENDPOINT" , lowerCAmelCase_ ) monkeypatch.setattr("datasets.config.HUB_DATASETS_URL" , lowerCAmelCase_ ) @pytest.fixture def _A ( lowerCAmelCase_ : str ): """simple docstring""" monkeypatch.setattr("huggingface_hub.hf_api.HfFolder.path_token" , lowerCAmelCase_ ) @pytest.fixture def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] ): """simple docstring""" HfFolder.save_token(lowerCAmelCase_ ) yield HfFolder.delete_token() @pytest.fixture(scope="session" ) def _A ( ): """simple docstring""" return HfApi(endpoint=lowerCAmelCase_ ) @pytest.fixture(scope="session" ) def _A ( lowerCAmelCase_ : HfApi ): """simple docstring""" lowerCAmelCase__ = HfFolder.get_token() HfFolder.save_token(lowerCAmelCase_ ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(lowerCAmelCase_ ) @pytest.fixture def _A ( lowerCAmelCase_ : Tuple ): """simple docstring""" def _cleanup_repo(lowerCAmelCase_ : Optional[Any] ): hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) return _cleanup_repo @pytest.fixture def _A ( lowerCAmelCase_ : Union[str, Any] ): """simple docstring""" @contextmanager def _temporary_repo(lowerCAmelCase_ : str ): try: yield repo_id finally: cleanup_repo(lowerCAmelCase_ ) return _temporary_repo @pytest.fixture(scope="session" ) def _A ( lowerCAmelCase_ : HfApi , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Any ): """simple docstring""" lowerCAmelCase__ = F'repo_txt_data-{int(time.time() * 1_0E3 )}' lowerCAmelCase__ = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ ) hf_api.upload_file( token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data/text_data.txt" , repo_id=lowerCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int ): """simple docstring""" return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="session" ) def _A ( lowerCAmelCase_ : HfApi , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ): """simple docstring""" lowerCAmelCase__ = F'repo_zipped_txt_data-{int(time.time() * 1_0E3 )}' lowerCAmelCase__ = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ ) hf_api.upload_file( token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ): """simple docstring""" return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="session" ) def _A ( lowerCAmelCase_ : HfApi , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ): """simple docstring""" lowerCAmelCase__ = F'repo_zipped_img_data-{int(time.time() * 1_0E3 )}' lowerCAmelCase__ = F'{CI_HUB_USER}/{repo_name}' hf_api.create_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" , private=lowerCAmelCase_ ) hf_api.upload_file( token=lowerCAmelCase_ , path_or_fileobj=str(lowerCAmelCase_ ) , path_in_repo="data.zip" , repo_id=lowerCAmelCase_ , repo_type="dataset" , ) yield repo_id try: hf_api.delete_repo(lowerCAmelCase_ , token=lowerCAmelCase_ , repo_type="dataset" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ): """simple docstring""" return hf_private_dataset_repo_zipped_img_data_
221
0
"""simple docstring""" import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from elia_utils import ( embed_questions_for_retrieval, make_qa_sas_model, qa_sas_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer __A = '''bart''' __A = True @st.cache(allow_output_mutation=_lowerCamelCase ) def lowercase_ ( ) -> int: '''simple docstring''' if LOAD_DENSE_INDEX: __lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" ) __lowerCamelCase : Tuple = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" ) __lowerCamelCase : Optional[Any] = qar_model.eval() else: __lowerCamelCase , __lowerCamelCase : Tuple = (None, None) if MODEL_TYPE == "bart": __lowerCamelCase : Any = AutoTokenizer.from_pretrained("yjernite/bart_eli5" ) __lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" ) __lowerCamelCase : Dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" ) sas_model.load_state_dict(save_dict["model"] ) __lowerCamelCase : Dict = sas_model.eval() else: __lowerCamelCase , __lowerCamelCase : Any = make_qa_sas_model( model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" ) return (qar_tokenizer, qar_model, sas_tokenizer, sas_model) @st.cache(allow_output_mutation=_lowerCamelCase ) def lowercase_ ( ) -> Union[str, Any]: '''simple docstring''' if LOAD_DENSE_INDEX: __lowerCamelCase : List[Any] = faiss.StandardGpuResources() __lowerCamelCase : Optional[int] = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"] __lowerCamelCase : Tuple = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , ) __lowerCamelCase : Optional[int] = faiss.IndexFlatIP(128 ) __lowerCamelCase : Union[str, Any] = faiss.index_cpu_to_gpu(_lowerCamelCase , 1 , _lowerCamelCase ) wikiaab_gpu_index_flat.add(_lowerCamelCase ) # TODO fix for larger GPU else: __lowerCamelCase , __lowerCamelCase : List[Any] = (None, None) __lowerCamelCase : Optional[int] = Elasticsearch([{"host": "localhost", "port": "9200"}] ) return (wikiaab_passages, wikiaab_gpu_index_flat, es_client) @st.cache(allow_output_mutation=_lowerCamelCase ) def lowercase_ ( ) -> List[str]: '''simple docstring''' __lowerCamelCase : int = datasets.load_dataset("eli5" , name="LFQA_reddit" ) __lowerCamelCase : List[Any] = elia["train_eli5"] __lowerCamelCase : Union[str, Any] = np.memmap( "eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) ) __lowerCamelCase : Union[str, Any] = faiss.IndexFlatIP(128 ) eli5_train_q_index.add(_lowerCamelCase ) return (elia_train, eli5_train_q_index) __A, __A, __A = load_indexes() __A, __A, __A, __A = load_models() __A, __A = load_train_data() def lowercase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: Optional[int]=10 ) -> Tuple: '''simple docstring''' __lowerCamelCase : Tuple = embed_questions_for_retrieval([question] , _lowerCamelCase , _lowerCamelCase ) __lowerCamelCase , __lowerCamelCase : Any = eli5_train_q_index.search(_lowerCamelCase , _lowerCamelCase ) __lowerCamelCase : List[Any] = [elia_train[int(_lowerCamelCase )] for i in I[0]] return nn_examples def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: str="wiki40b" , _lowerCamelCase: Any="dense" , _lowerCamelCase: Optional[Any]=10 ) -> Union[str, Any]: '''simple docstring''' if source == "none": __lowerCamelCase , __lowerCamelCase : List[str] = (" <P> ".join(["" for _ in range(11 )] ).strip(), []) else: if method == "dense": __lowerCamelCase , __lowerCamelCase : Optional[Any] = query_qa_dense_index( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) else: __lowerCamelCase , __lowerCamelCase : Tuple = query_es_index( _lowerCamelCase , _lowerCamelCase , index_name="english_wiki40b_snippets_100w" , n_results=_lowerCamelCase , ) __lowerCamelCase : Tuple = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] __lowerCamelCase : Optional[Any] = "question: {} context: {}".format(_lowerCamelCase , _lowerCamelCase ) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _lowerCamelCase : None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _lowerCamelCase : None), } ) def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: Any , _lowerCamelCase: List[str] , _lowerCamelCase: Any=64 , _lowerCamelCase: str=256 , _lowerCamelCase: str=False , _lowerCamelCase: str=2 , _lowerCamelCase: str=0.95 , _lowerCamelCase: List[Any]=0.8 ) -> Tuple: '''simple docstring''' with torch.no_grad(): __lowerCamelCase : Optional[int] = qa_sas_generate( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , num_answers=1 , num_beams=_lowerCamelCase , min_len=_lowerCamelCase , max_len=_lowerCamelCase , do_sample=_lowerCamelCase , temp=_lowerCamelCase , top_p=_lowerCamelCase , top_k=_lowerCamelCase , max_input_length=1024 , device="cuda:0" , )[0] return (answer, support_list) st.title('''Long Form Question Answering with ELI5''') # Start sidebar __A = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>''' __A = ''' <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> ''' % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia __A = ''' This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. ''' st.sidebar.markdown(description, unsafe_allow_html=True) __A = [ '''Answer the question''', '''View the retrieved document only''', '''View the most similar ELI5 question and answer''', '''Show me everything, please!''', ] __A = st.sidebar.checkbox('''Demo options''') if demo_options: __A = st.sidebar.selectbox( '''''', action_list, index=3, ) __A = action_list.index(action_st) __A = st.sidebar.selectbox( '''''', ['''Show full text of passages''', '''Show passage section titles'''], index=0, ) __A = show_type == '''Show full text of passages''' else: __A = 3 __A = True __A = st.sidebar.checkbox('''Retrieval options''') if retrieval_options: __A = ''' ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. ''' st.sidebar.markdown(retriever_info) __A = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none''']) __A = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed''']) else: __A = '''wiki40b''' __A = '''dense''' __A = '''beam''' __A = 2 __A = 64 __A = 256 __A = None __A = None __A = st.sidebar.checkbox('''Generation options''') if generate_options: __A = ''' ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder\'s output probabilities. ''' st.sidebar.markdown(generate_info) __A = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled''']) __A = st.sidebar.slider( '''Minimum generation length''', min_value=8, max_value=256, value=64, step=8, format=None, key=None ) __A = st.sidebar.slider( '''Maximum generation length''', min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": __A = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: __A = st.sidebar.slider( '''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) __A = st.sidebar.slider( '''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) __A = None # start main text __A = [ '''<MY QUESTION>''', '''How do people make chocolate?''', '''Why do we get a fever when we are sick?''', '''How can different animals perceive different colors?''', '''What is natural language processing?''', '''What\'s the best way to treat a sunburn?''', '''What exactly are vitamins ?''', '''How does nuclear energy provide electricity?''', '''What\'s the difference between viruses and bacteria?''', '''Why are flutes classified as woodwinds when most of them are made out of metal ?''', '''Why do people like drinking coffee even though it tastes so bad?''', '''What happens when wine ages? How does it make the wine taste better?''', '''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''', '''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''', '''How does New Zealand have so many large bird predators?''', ] __A = st.selectbox( '''What would you like to ask? ---- select <MY QUESTION> to enter a new query''', questions_list, index=1, ) if question_s == "<MY QUESTION>": __A = st.text_input('''Enter your question here:''', '''''') else: __A = question_s if st.button('''Show me!'''): if action in [0, 1, 3]: if index_type == "mixed": __A, __A = make_support(question, source=wiki_source, method='''dense''', n_results=10) __A, __A = make_support(question, source=wiki_source, method='''sparse''', n_results=10) __A = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] __A = support_list[:10] __A = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list]) else: __A, __A = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: __A, __A = answer_question( question_doc, sas_model, sas_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == '''sampled'''), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown('''### The model generated answer is:''') st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''') for i, res in enumerate(support_list): __A = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_''')) __A = res[1].strip() if sec_titles == "": __A = '''[{}]({})'''.format(res[0], wiki_url) else: __A = sec_titles.split(''' & ''') __A = ''' & '''.join( ['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list] ) st.markdown( '''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True ) if action in [2, 3]: __A = find_nearest_training(question) __A = nn_train_list[0] st.markdown( '''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title''']) ) __A = [ '''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != ''''''])) for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score'''])) if i == 0 or sc > 2 ] st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st))) __A = ''' --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* ''' st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
135
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { '''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''', '''umberto-commoncrawl-cased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json''' ), '''umberto-wikipedia-uncased-v1''': ( '''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json''' ), } class _snake_case ( a__ ): snake_case__ = "camembert" def __init__( self : Union[str, Any] , UpperCAmelCase : List[Any]=30522 , UpperCAmelCase : Optional[int]=768 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Tuple=12 , UpperCAmelCase : Tuple=3072 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Tuple=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Tuple=1E-12 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : int=0 , UpperCAmelCase : int=2 , UpperCAmelCase : str="absolute" , UpperCAmelCase : Dict=True , UpperCAmelCase : int=None , **UpperCAmelCase : List[str] , ): super().__init__(pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , **UpperCAmelCase ) __lowerCamelCase : Any = vocab_size __lowerCamelCase : Optional[int] = hidden_size __lowerCamelCase : int = num_hidden_layers __lowerCamelCase : int = num_attention_heads __lowerCamelCase : int = hidden_act __lowerCamelCase : Union[str, Any] = intermediate_size __lowerCamelCase : Optional[int] = hidden_dropout_prob __lowerCamelCase : List[Any] = attention_probs_dropout_prob __lowerCamelCase : Dict = max_position_embeddings __lowerCamelCase : Tuple = type_vocab_size __lowerCamelCase : Any = initializer_range __lowerCamelCase : str = layer_norm_eps __lowerCamelCase : List[Any] = position_embedding_type __lowerCamelCase : Dict = use_cache __lowerCamelCase : List[Any] = classifier_dropout class _snake_case ( a__ ): @property def lowerCamelCase__ ( self : int ): if self.task == "multiple-choice": __lowerCamelCase : List[str] = {0: "batch", 1: "choice", 2: "sequence"} else: __lowerCamelCase : Tuple = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
135
1
"""simple docstring""" from math import factorial _a = {str(digit): factorial(digit) for digit in range(10)} def __a ( __lowerCamelCase ): if not isinstance(__lowerCamelCase, __lowerCamelCase ): raise TypeError("Parameter number must be int" ) if number < 0: raise ValueError("Parameter number must be greater than or equal to 0" ) # Converts number in string to iterate on its digits and adds its factorial. return sum(DIGIT_FACTORIAL[digit] for digit in str(__lowerCamelCase ) ) def __a ( __lowerCamelCase = 60, __lowerCamelCase = 100_0000 ): if not isinstance(__lowerCamelCase, __lowerCamelCase ) or not isinstance(__lowerCamelCase, __lowerCamelCase ): raise TypeError("Parameters chain_length and number_limit must be int" ) if chain_length <= 0 or number_limit <= 0: raise ValueError( "Parameters chain_length and number_limit must be greater than 0" ) # the counter for the chains with the exact desired length UpperCAmelCase_ : str = 0 # the cached sizes of the previous chains UpperCAmelCase_ : dict[int, int] = {} for start_chain_element in range(1, __lowerCamelCase ): # The temporary set will contain the elements of the chain UpperCAmelCase_ : Optional[Any] = set() UpperCAmelCase_ : Optional[int] = 0 # Stop computing the chain when you find a cached size, a repeating item or the # length is greater then the desired one. UpperCAmelCase_ : Tuple = start_chain_element while ( chain_element not in chain_sets_lengths and chain_element not in chain_set and chain_set_length <= chain_length ): chain_set.add(__lowerCamelCase ) chain_set_length += 1 UpperCAmelCase_ : str = digit_factorial_sum(__lowerCamelCase ) if chain_element in chain_sets_lengths: chain_set_length += chain_sets_lengths[chain_element] UpperCAmelCase_ : str = chain_set_length # If chain contains the exact amount of elements increase the counter if chain_set_length == chain_length: chains_counter += 1 return chains_counter if __name__ == "__main__": import doctest doctest.testmod() print(f"""{solution()}""")
23
"""simple docstring""" import datasets _a = '\\n@InProceedings{conneau2018xnli,\n author = "Conneau, Alexis\n and Rinott, Ruty\n and Lample, Guillaume\n and Williams, Adina\n and Bowman, Samuel R.\n and Schwenk, Holger\n and Stoyanov, Veselin",\n title = "XNLI: Evaluating Cross-lingual Sentence Representations",\n booktitle = "Proceedings of the 2018 Conference on Empirical Methods\n in Natural Language Processing",\n year = "2018",\n publisher = "Association for Computational Linguistics",\n location = "Brussels, Belgium",\n}\n' _a = '\\nXNLI is a subset of a few thousand examples from MNLI which has been translated\ninto a 14 different languages (some low-ish resource). As with MNLI, the goal is\nto predict textual entailment (does sentence A imply/contradict/neither sentence\nB) and is a classification task (given two sentences, predict one of three\nlabels).\n' _a = '\nComputes XNLI score which is just simple accuracy.\nArgs:\n predictions: Predicted labels.\n references: Ground truth labels.\nReturns:\n \'accuracy\': accuracy\nExamples:\n\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> xnli_metric = datasets.load_metric("xnli")\n >>> results = xnli_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n' def __a ( __lowerCamelCase, __lowerCamelCase ): return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class A_ (datasets.Metric ): '''simple docstring''' def UpperCamelCase__ ( self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), "references": datasets.Value("int64" if self.config_name != "sts-b" else "float32" ), } ) , codebase_urls=[] , reference_urls=[] , format="numpy" , ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ ): """simple docstring""" return {"accuracy": simple_accuracy(lowercase_ , lowercase_ )}
23
1
import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation def UpperCamelCase( __UpperCamelCase : Union[str, Any] ): lowerCAmelCase_ : Any = 384 if "tiny" in model_name: lowerCAmelCase_ : Tuple = [3, 3, 9, 3] lowerCAmelCase_ : List[str] = [96, 192, 384, 768] if "small" in model_name: lowerCAmelCase_ : List[Any] = [3, 3, 27, 3] lowerCAmelCase_ : List[str] = [96, 192, 384, 768] if "base" in model_name: lowerCAmelCase_ : Optional[int] = [3, 3, 27, 3] lowerCAmelCase_ : List[str] = [128, 256, 512, 1024] lowerCAmelCase_ : int = 512 if "large" in model_name: lowerCAmelCase_ : List[str] = [3, 3, 27, 3] lowerCAmelCase_ : int = [192, 384, 768, 1536] lowerCAmelCase_ : List[Any] = 768 if "xlarge" in model_name: lowerCAmelCase_ : Optional[Any] = [3, 3, 27, 3] lowerCAmelCase_ : Optional[int] = [256, 512, 1024, 2048] lowerCAmelCase_ : Optional[Any] = 1024 # set label information lowerCAmelCase_ : Tuple = 150 lowerCAmelCase_ : Optional[int] = '''huggingface/label-files''' lowerCAmelCase_ : str = '''ade20k-id2label.json''' lowerCAmelCase_ : List[str] = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='''dataset''' ) ,'''r''' ) ) lowerCAmelCase_ : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()} lowerCAmelCase_ : Optional[int] = {v: k for k, v in idalabel.items()} lowerCAmelCase_ : Dict = ConvNextConfig( depths=__UpperCamelCase ,hidden_sizes=__UpperCamelCase ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) lowerCAmelCase_ : List[Any] = UperNetConfig( backbone_config=__UpperCamelCase ,auxiliary_in_channels=__UpperCamelCase ,num_labels=__UpperCamelCase ,idalabel=__UpperCamelCase ,labelaid=__UpperCamelCase ,) return config def UpperCamelCase( __UpperCamelCase : List[Any] ): lowerCAmelCase_ : List[str] = [] # fmt: off # stem rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') ) rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') ) rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((f"""backbone.stages.{i}.{j}.gamma""", f"""backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.depthwise_conv.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.dwconv.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.norm.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.layernorm.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv1.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.weight""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight""") ) rename_keys.append((f"""backbone.stages.{i}.{j}.pointwise_conv2.bias""", f"""backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias""") ) if i > 0: rename_keys.append((f"""backbone.downsample_layers.{i}.0.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.0.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.0.bias""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.weight""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.weight""") ) rename_keys.append((f"""backbone.downsample_layers.{i}.1.bias""", f"""backbone.encoder.stages.{i}.downsampling_layer.1.bias""") ) rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") ) rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") ) # decode head rename_keys.extend( [ ('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''), ('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''), ('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''), ('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''), ] ) # fmt: on return rename_keys def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ,__UpperCamelCase : Tuple ): lowerCAmelCase_ : Any = dct.pop(__UpperCamelCase ) lowerCAmelCase_ : Tuple = val def UpperCamelCase( __UpperCamelCase : Optional[int] ,__UpperCamelCase : int ,__UpperCamelCase : Dict ): lowerCAmelCase_ : List[Any] = { '''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''', '''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''', '''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''', '''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''', '''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''', } lowerCAmelCase_ : str = model_name_to_url[model_name] lowerCAmelCase_ : str = torch.hub.load_state_dict_from_url(__UpperCamelCase ,map_location='''cpu''' )['''state_dict'''] lowerCAmelCase_ : Optional[int] = get_upernet_config(__UpperCamelCase ) lowerCAmelCase_ : Any = UperNetForSemanticSegmentation(__UpperCamelCase ) model.eval() # replace "bn" => "batch_norm" for key in state_dict.copy().keys(): lowerCAmelCase_ : Dict = state_dict.pop(__UpperCamelCase ) if "bn" in key: lowerCAmelCase_ : List[str] = key.replace('''bn''' ,'''batch_norm''' ) lowerCAmelCase_ : Tuple = val # rename keys lowerCAmelCase_ : str = create_rename_keys(__UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # verify on image lowerCAmelCase_ : int = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg''' lowerCAmelCase_ : Tuple = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw ).convert('''RGB''' ) lowerCAmelCase_ : Dict = SegformerImageProcessor() lowerCAmelCase_ : Any = processor(__UpperCamelCase ,return_tensors='''pt''' ).pixel_values with torch.no_grad(): lowerCAmelCase_ : str = model(__UpperCamelCase ) if model_name == "upernet-convnext-tiny": lowerCAmelCase_ : List[str] = torch.tensor( [[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ) elif model_name == "upernet-convnext-small": lowerCAmelCase_ : Union[str, Any] = torch.tensor( [[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] ) elif model_name == "upernet-convnext-base": lowerCAmelCase_ : Dict = torch.tensor( [[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] ) elif model_name == "upernet-convnext-large": lowerCAmelCase_ : Optional[Any] = torch.tensor( [[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] ) elif model_name == "upernet-convnext-xlarge": lowerCAmelCase_ : Dict = torch.tensor( [[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] ) print('''Logits:''' ,outputs.logits[0, 0, :3, :3] ) assert torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCamelCase ) print(f"""Saving processor to {pytorch_dump_folder_path}""" ) processor.save_pretrained(__UpperCamelCase ) if push_to_hub: print(f"""Pushing model and processor for {model_name} to hub""" ) model.push_to_hub(f"""openmmlab/{model_name}""" ) processor.push_to_hub(f"""openmmlab/{model_name}""" ) if __name__ == "__main__": A__ : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''upernet-convnext-tiny''', type=str, choices=[F'''upernet-convnext-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']], help='''Name of the ConvNext UperNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) A__ : int = parser.parse_args() convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
103
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class A__(a_ ): """simple docstring""" def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]: super().__init__(*_lowercase , **_lowercase ) a_ : Optional[int] = eval_examples a_ : Tuple = post_process_function def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]: a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset a_ : List[str] = self.get_eval_dataloader(_lowercase ) a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. a_ : Optional[int] = self.compute_metrics a_ : List[str] = None a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop a_ : Any = time.time() try: a_ : Union[str, Any] = eval_loop( _lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , ) finally: a_ : Dict = compute_metrics a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions ) a_ : Optional[Any] = self.compute_metrics(_lowercase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): a_ : List[str] = metrics.pop(_lowercase ) metrics.update(output.metrics ) else: a_ : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_lowercase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase ) return metrics def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str: a_ : Tuple = self.get_test_dataloader(_lowercase ) # Temporarily disable metric computation, we will do it in the loop here. a_ : List[Any] = self.compute_metrics a_ : int = None a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop a_ : Union[str, Any] = time.time() try: a_ : List[str] = eval_loop( _lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , ) finally: a_ : Optional[Any] = compute_metrics a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" ) a_ : List[Any] = self.compute_metrics(_lowercase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): a_ : int = metrics.pop(_lowercase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
248
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __SCREAMING_SNAKE_CASE ="base_with_context" def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) lowercase_ : Optional[Any] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): lowercase_ : Dict = weights[F'''layers_{lyr_num}'''] lowercase_ : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) lowercase_ : Any = ly_weight['attention'] lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : List[Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ): lowercase_ : Dict = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): lowercase_ : Any = weights[F'''layers_{lyr_num}'''] lowercase_ : List[str] = ly_weight['attention'] lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : Dict = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) lowercase_ : str = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) lowercase_ : str = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): lowercase_ : Optional[int] = weights[F'''layers_{lyr_num}'''] lowercase_ : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) lowercase_ : int = ly_weight['self_attention'] lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : List[str] = ly_weight['MultiHeadDotProductAttention_0'] lowercase_ : int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : int = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : str = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : int = checkpoints.load_tax_checkpoint(args.checkpoint_path ) lowercase_ : Union[str, Any] = jnp.tree_util.tree_map(onp.array , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] lowercase_ : str = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) lowercase_ : Union[str, Any] = inference.parse_training_gin_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = inference.InferenceModel(args.checkpoint_path , __SCREAMING_SNAKE_CASE ) lowercase_ : str = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) lowercase_ : str = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) lowercase_ : Optional[Any] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) lowercase_ : List[str] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) lowercase_ : Dict = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = load_decoder(ta_checkpoint['target']['decoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) lowercase_ : Dict = SpectrogramDiffusionPipeline( notes_encoder=__SCREAMING_SNAKE_CASE , continuous_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , melgan=__SCREAMING_SNAKE_CASE , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F"{MODEL}/checkpoint_500000", type=str, required=False, help="Path to the original jax model checkpoint.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() main(args)
363
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase : int = logging.get_logger(__name__) lowerCAmelCase : Optional[int] = { 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class _A ( __magic_name__): SCREAMING_SNAKE_CASE : int = '''beit''' def __init__( self , _SCREAMING_SNAKE_CASE=8192 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[3, 5, 7, 11] , _SCREAMING_SNAKE_CASE=[1, 2, 3, 6] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.4 , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=255 , **_SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**_SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ : Tuple = vocab_size SCREAMING_SNAKE_CASE_ : Dict = hidden_size SCREAMING_SNAKE_CASE_ : Optional[int] = num_hidden_layers SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads SCREAMING_SNAKE_CASE_ : int = intermediate_size SCREAMING_SNAKE_CASE_ : List[str] = hidden_act SCREAMING_SNAKE_CASE_ : int = hidden_dropout_prob SCREAMING_SNAKE_CASE_ : Dict = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range SCREAMING_SNAKE_CASE_ : str = layer_norm_eps SCREAMING_SNAKE_CASE_ : Optional[int] = image_size SCREAMING_SNAKE_CASE_ : int = patch_size SCREAMING_SNAKE_CASE_ : int = num_channels SCREAMING_SNAKE_CASE_ : str = use_mask_token SCREAMING_SNAKE_CASE_ : List[str] = use_absolute_position_embeddings SCREAMING_SNAKE_CASE_ : str = use_relative_position_bias SCREAMING_SNAKE_CASE_ : Any = use_shared_relative_position_bias SCREAMING_SNAKE_CASE_ : List[str] = layer_scale_init_value SCREAMING_SNAKE_CASE_ : int = drop_path_rate SCREAMING_SNAKE_CASE_ : Dict = use_mean_pooling # decode head attributes (semantic segmentation) SCREAMING_SNAKE_CASE_ : List[str] = out_indices SCREAMING_SNAKE_CASE_ : Tuple = pool_scales # auxiliary head attributes (semantic segmentation) SCREAMING_SNAKE_CASE_ : str = use_auxiliary_head SCREAMING_SNAKE_CASE_ : Tuple = auxiliary_loss_weight SCREAMING_SNAKE_CASE_ : Union[str, Any] = auxiliary_channels SCREAMING_SNAKE_CASE_ : int = auxiliary_num_convs SCREAMING_SNAKE_CASE_ : str = auxiliary_concat_input SCREAMING_SNAKE_CASE_ : int = semantic_loss_ignore_index class _A ( __magic_name__): SCREAMING_SNAKE_CASE : List[str] = version.parse('''1.11''') @property def UpperCAmelCase ( self ): """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def UpperCAmelCase ( self ): """simple docstring""" return 1e-4
253
import os from typing import Dict, List, Tuple, TypeVar, Union lowerCAmelCase : str = TypeVar('T') lowerCAmelCase : Optional[Any] = Union[List[T], Tuple[T, ...]] lowerCAmelCase : str = Union[T, List[T], Dict[str, T]] lowerCAmelCase : Union[str, Any] = Union[str, bytes, os.PathLike]
253
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case__ : List[Any] = { '''configuration_time_series_transformer''': [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimeSeriesTransformerConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case__ : Optional[Any] = [ '''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TimeSeriesTransformerForPrediction''', '''TimeSeriesTransformerModel''', '''TimeSeriesTransformerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
356
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow snake_case__ : Optional[Any] = False class snake_case_( unittest.TestCase ): def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[Any]=3_2 ): set_seed(0 ) lowerCAmelCase : Tuple = UNetaDModel(sample_size=UpperCamelCase_ , in_channels=3 , out_channels=3 ) lowerCAmelCase : List[str] = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def lowerCamelCase__ ( self : Dict ): lowerCAmelCase : List[str] = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable lowerCAmelCase : str = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) lowerCAmelCase : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=UpperCamelCase_ , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) lowerCAmelCase : int = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2) ).to(UpperCamelCase_ ) for _ in range(4 )] lowerCAmelCase : Optional[int] = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(UpperCamelCase_ ) for _ in range(4 )] # train with a DDPM scheduler lowerCAmelCase, lowerCAmelCase : str = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : List[Any] = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : List[str] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM lowerCAmelCase, lowerCAmelCase : List[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(UpperCamelCase_ ) for i in range(4 ): optimizer.zero_grad() lowerCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) lowerCAmelCase : Optional[int] = model(UpperCamelCase_ , timesteps[i] ).sample lowerCAmelCase : int = torch.nn.functional.mse_loss(UpperCamelCase_ , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) ) self.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-5 ) )
314
0
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _lowerCAmelCase :Dict = logging.get_logger(__name__) _lowerCAmelCase :Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} _lowerCAmelCase :Any = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } _lowerCAmelCase :Union[str, Any] = { 'Salesforce/codegen-350M-mono': 2_048, } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] a__ =CodeGenTokenizer def __init__( self , A=None , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , **A , ) -> Optional[int]: super().__init__( A , A , tokenizer_file=A , unk_token=A , bos_token=A , eos_token=A , add_prefix_space=A , **A , ) if kwargs.pop('''add_bos_token''' , A ): _UpperCAmelCase : str = kwargs.pop('''name_or_path''' , '''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n' f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n' '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) _UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space: _UpperCAmelCase : Optional[Any] = getattr(A , pre_tok_state.pop('''type''' ) ) _UpperCAmelCase : Any = add_prefix_space _UpperCAmelCase : Any = pre_tok_class(**A ) _UpperCAmelCase : Any = add_prefix_space def __lowerCAmelCase ( self , *A , **A ) -> BatchEncoding: _UpperCAmelCase : Optional[Any] = kwargs.get('''is_split_into_words''' , A ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A , **A ) def __lowerCAmelCase ( self , *A , **A ) -> BatchEncoding: _UpperCAmelCase : Optional[int] = kwargs.get('''is_split_into_words''' , A ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*A , **A ) def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: _UpperCAmelCase : Optional[int] = self._tokenizer.model.save(A , name=A ) return tuple(A ) def __lowerCAmelCase ( self , A , A = False , A = None , A = None , **A , ) -> str: _UpperCAmelCase : Dict = super().decode( token_ids=A , skip_special_tokens=A , clean_up_tokenization_spaces=A , **A , ) if truncate_before_pattern is not None and len(A ) > 0: _UpperCAmelCase : Tuple = self.truncate(A , A ) return decoded_text def __lowerCAmelCase ( self , A , A ) -> Optional[Any]: def find_re(A , A , A ): _UpperCAmelCase : Any = pattern.search(A , A ) return m.start() if m else -1 _UpperCAmelCase : Any = [re.compile(A , re.MULTILINE ) for pattern in truncate_before_pattern] _UpperCAmelCase : str = list(re.finditer('''^print''' , A , re.MULTILINE ) ) if len(A ) > 1: _UpperCAmelCase : Tuple = completion[: prints[1].start()] _UpperCAmelCase : List[Any] = list(re.finditer('''^def''' , A , re.MULTILINE ) ) if len(A ) > 1: _UpperCAmelCase : Optional[int] = completion[: defs[1].start()] _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Optional[Any] = [ pos for pos in [find_re(A , A , A ) for terminal in terminals] if pos != -1 ] if len(A ) > 0: return completion[: min(A )] else: return completion
263
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_ (UpperCamelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0] def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
263
1
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class SCREAMING_SNAKE_CASE_ ( __a ): """simple docstring""" def __init__( self , lowerCAmelCase__ = "▁" , lowerCAmelCase__ = True , lowerCAmelCase__ = "<unk>" , lowerCAmelCase__ = "</s>" , lowerCAmelCase__ = "<pad>" , ): __SCREAMING_SNAKE_CASE = { """pad""": {"""id""": 0, """token""": pad_token}, """eos""": {"""id""": 1, """token""": eos_token}, """unk""": {"""id""": 2, """token""": unk_token}, } __SCREAMING_SNAKE_CASE = [None] * len(self.special_tokens) for token_dict in self.special_tokens.values(): __SCREAMING_SNAKE_CASE = token_dict["""token"""] __SCREAMING_SNAKE_CASE = Tokenizer(Unigram()) __SCREAMING_SNAKE_CASE = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(""" {2,}""") , """ """), normalizers.Lowercase(), ]) __SCREAMING_SNAKE_CASE = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__), pre_tokenizers.Digits(individual_digits=lowerCAmelCase__), pre_tokenizers.Punctuation(), ]) __SCREAMING_SNAKE_CASE = decoders.Metaspace(replacement=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = TemplateProcessing( single=f"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , ) __SCREAMING_SNAKE_CASE = { """model""": """SentencePieceUnigram""", """replacement""": replacement, """add_prefix_space""": add_prefix_space, } super().__init__(lowerCAmelCase__ , lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 8_0_0_0 , lowerCAmelCase__ = True , ): __SCREAMING_SNAKE_CASE = trainers.UnigramTrainer( vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = [files] self._tokenizer.train(lowerCAmelCase__ , trainer=lowerCAmelCase__) self.add_unk_id() def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ = 8_0_0_0 , lowerCAmelCase__ = True , ): __SCREAMING_SNAKE_CASE = trainers.UnigramTrainer( vocab_size=lowerCAmelCase__ , special_tokens=self.special_tokens_list , show_progress=lowerCAmelCase__ , ) self._tokenizer.train_from_iterator(lowerCAmelCase__ , trainer=lowerCAmelCase__) self.add_unk_id() def snake_case_ ( self): __SCREAMING_SNAKE_CASE = json.loads(self._tokenizer.to_str()) __SCREAMING_SNAKE_CASE = self.special_tokens["""unk"""]["""id"""] __SCREAMING_SNAKE_CASE = Tokenizer.from_str(json.dumps(lowerCAmelCase__))
255
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __magic_name__ = { "iou_prediction_head.layers.0": "iou_prediction_head.proj_in", "iou_prediction_head.layers.1": "iou_prediction_head.layers.0", "iou_prediction_head.layers.2": "iou_prediction_head.proj_out", "mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1", "mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm", "mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2", "mask_downscaling.0": "mask_embed.conv1", "mask_downscaling.1": "mask_embed.layer_norm1", "mask_downscaling.3": "mask_embed.conv2", "mask_downscaling.4": "mask_embed.layer_norm2", "mask_downscaling.6": "mask_embed.conv3", "point_embeddings": "point_embed", "pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding", "image_encoder": "vision_encoder", "neck.0": "neck.conv1", "neck.1": "neck.layer_norm1", "neck.2": "neck.conv2", "neck.3": "neck.layer_norm2", "patch_embed.proj": "patch_embed.projection", ".norm": ".layer_norm", "blocks": "layers", } def _lowerCAmelCase ( UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = {} state_dict.pop("""pixel_mean""" , UpperCamelCase_ ) state_dict.pop("""pixel_std""" , UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*""" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __SCREAMING_SNAKE_CASE = key.replace(UpperCamelCase_ , UpperCamelCase_ ) if re.match(UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = int(re.match(UpperCamelCase_ , UpperCamelCase_ ).group(2 ) ) if layer_nb == 0: __SCREAMING_SNAKE_CASE = key.replace("""layers.0""" , """proj_in""" ) elif layer_nb == 1: __SCREAMING_SNAKE_CASE = key.replace("""layers.1""" , """layers.0""" ) elif layer_nb == 2: __SCREAMING_SNAKE_CASE = key.replace("""layers.2""" , """proj_out""" ) __SCREAMING_SNAKE_CASE = value __SCREAMING_SNAKE_CASE = model_state_dict[ """prompt_encoder.shared_embedding.positional_embedding""" ] return model_state_dict def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="ybelkada/segment-anything" ): __SCREAMING_SNAKE_CASE = hf_hub_download(UpperCamelCase_ , f"checkpoints/{model_name}.pth" ) if "sam_vit_b" in model_name: __SCREAMING_SNAKE_CASE = SamConfig() elif "sam_vit_l" in model_name: __SCREAMING_SNAKE_CASE = SamVisionConfig( hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) __SCREAMING_SNAKE_CASE = SamConfig( vision_config=UpperCamelCase_ , ) elif "sam_vit_h" in model_name: __SCREAMING_SNAKE_CASE = SamVisionConfig( hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) __SCREAMING_SNAKE_CASE = SamConfig( vision_config=UpperCamelCase_ , ) __SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location="""cpu""" ) __SCREAMING_SNAKE_CASE = replace_keys(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = SamImageProcessor() __SCREAMING_SNAKE_CASE = SamProcessor(image_processor=UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = SamModel(UpperCamelCase_ ) hf_model.load_state_dict(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = hf_model.to("""cuda""" ) __SCREAMING_SNAKE_CASE = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png""" __SCREAMING_SNAKE_CASE = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("""RGB""" ) __SCREAMING_SNAKE_CASE = [[[400, 650]]] __SCREAMING_SNAKE_CASE = [[1]] __SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_890_251_159_668 __SCREAMING_SNAKE_CASE = processor( images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_712_603_092_193_604 __SCREAMING_SNAKE_CASE = ((75, 275, 1725, 850),) __SCREAMING_SNAKE_CASE = processor(images=np.array(UpperCamelCase_ ) , input_boxes=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_686_015_605_926_514 # Test with 2 points and 1 image. __SCREAMING_SNAKE_CASE = [[[400, 650], [800, 650]]] __SCREAMING_SNAKE_CASE = [[1, 1]] __SCREAMING_SNAKE_CASE = processor( images=np.array(UpperCamelCase_ ) , input_points=UpperCamelCase_ , input_labels=UpperCamelCase_ , return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __SCREAMING_SNAKE_CASE = hf_model(**UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_936_047_792_434_692 if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() __magic_name__ = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"] parser.add_argument( "--model_name", default="sam_vit_h_4b8939", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) parser.add_argument( "--model_hub_id", default="ybelkada/segment-anything", choices=choices, type=str, help="Path to hf config.json of model to convert", ) __magic_name__ = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
255
1
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _a = logging.get_logger(__name__) class __A ( snake_case_ ): '''simple docstring''' def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): '''simple docstring''' warnings.warn( '''The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use ChineseCLIPImageProcessor instead.''' , __lowerCAmelCase , ) super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
209
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer lowercase = logging.get_logger(__name__) lowercase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} lowercase = { "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } lowercase = { "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } lowercase = { "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' lowerCAmelCase = VOCAB_FILES_NAMES lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase = SqueezeBertTokenizer def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ) -> Tuple: super().__init__( a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , a ) != do_lower_case or normalizer_state.get('strip_accents' , a ) != strip_accents or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars ): snake_case_ = getattr(a , normalizer_state.pop('type' ) ) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**a ) snake_case_ = do_lower_case def _UpperCamelCase ( self , a , a=None ) -> Tuple: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCamelCase ( self , a , a = None ) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCamelCase ( self , a , a = None ) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(a , name=a ) return tuple(a )
178
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase : Union[str, Any] = logging.get_logger(__name__) lowercase : Optional[Any] = { 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class lowerCamelCase__ ( lowerCAmelCase__): '''simple docstring''' _A = "mobilenet_v1" def __init__( self :Dict , a :Dict=3 , a :str=2_2_4 , a :Tuple=1.0 , a :List[str]=8 , a :List[str]="relu6" , a :int=True , a :str=0.999 , a :List[Any]=0.02 , a :Tuple=0.001 , **a :int , ) -> List[str]: super().__init__(**_SCREAMING_SNAKE_CASE ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) __UpperCamelCase : Dict = num_channels __UpperCamelCase : str = image_size __UpperCamelCase : Optional[Any] = depth_multiplier __UpperCamelCase : Any = min_depth __UpperCamelCase : Any = hidden_act __UpperCamelCase : Any = tf_padding __UpperCamelCase : Union[str, Any] = classifier_dropout_prob __UpperCamelCase : Any = initializer_range __UpperCamelCase : List[Any] = layer_norm_eps class lowerCamelCase__ ( lowerCAmelCase__): '''simple docstring''' _A = version.parse('1.11') @property def _lowerCamelCase ( self :Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def _lowerCamelCase ( self :int ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def _lowerCamelCase ( self :Optional[Any] ) -> float: return 1E-4
355
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 10 , _lowerCamelCase : int = 22) -> int: '''simple docstring''' __UpperCamelCase : Any = range(1 , _lowerCamelCase) __UpperCamelCase : int = range(1 , _lowerCamelCase) return sum( 1 for power in powers for base in bases if len(str(base**power)) == power) if __name__ == "__main__": print(f"{solution(10, 22) = }")
151
0
'''simple docstring''' def __lowerCAmelCase ( UpperCamelCase__ = 50 ) -> int: __lowerCamelCase = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
67
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # Base Case if curr_ind == len(UpperCAmelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCAmelCase ) ): if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # Insert current vertex into path as next transition snake_case_ = next_ver # Validate created path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ): return True # Backtrack snake_case_ = -1 return False def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]: snake_case_ = [-1] * (len(UpperCAmelCase ) + 1) # initialize start and end of path with starting index snake_case_ = snake_case_ = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
69
0
'''simple docstring''' import unittest from transformers import MraConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraModel, ) from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : Dict=16 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : int=36 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : List[Any]=16 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[int]=None , ): """simple docstring""" __UpperCAmelCase : Optional[int] = parent __UpperCAmelCase : Tuple = batch_size __UpperCAmelCase : Tuple = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : List[str] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : Tuple = use_labels __UpperCAmelCase : Optional[int] = vocab_size __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : Union[str, Any] = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : Dict = attention_probs_dropout_prob __UpperCAmelCase : int = max_position_embeddings __UpperCAmelCase : Union[str, Any] = type_vocab_size __UpperCAmelCase : Any = type_sequence_label_size __UpperCAmelCase : str = initializer_range __UpperCAmelCase : Tuple = num_labels __UpperCAmelCase : int = num_choices __UpperCAmelCase : Optional[Any] = scope def lowerCamelCase_ ( self : str ): """simple docstring""" __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Dict = None if self.use_input_mask: __UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : List[Any] = None if self.use_token_type_ids: __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : int = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Tuple = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" return MraConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , ) def lowerCamelCase_ ( self : Any ): """simple docstring""" __UpperCAmelCase : Optional[int] = self.get_config() __UpperCAmelCase : Tuple = 300 return config def lowerCamelCase_ ( self : str ): """simple docstring""" ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Optional[Any] = self.prepare_config_and_inputs() __UpperCAmelCase : Optional[int] = True __UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ): """simple docstring""" __UpperCAmelCase : List[str] = MraModel(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __UpperCAmelCase : List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) __UpperCAmelCase : Optional[int] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) __UpperCAmelCase : str = model(UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , ): """simple docstring""" __UpperCAmelCase : Any = True __UpperCAmelCase : int = MraModel(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __UpperCAmelCase : str = model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , ) __UpperCAmelCase : int = model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , ) __UpperCAmelCase : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict ): """simple docstring""" __UpperCAmelCase : Optional[Any] = MraForMaskedLM(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __UpperCAmelCase : Tuple = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ): """simple docstring""" __UpperCAmelCase : int = MraForQuestionAnswering(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __UpperCAmelCase : int = model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple ): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.num_labels __UpperCAmelCase : List[Any] = MraForSequenceClassification(UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __UpperCAmelCase : List[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ): """simple docstring""" __UpperCAmelCase : Optional[int] = self.num_labels __UpperCAmelCase : int = MraForTokenClassification(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ): """simple docstring""" __UpperCAmelCase : Tuple = self.num_choices __UpperCAmelCase : Optional[Any] = MraForMultipleChoice(config=UpperCAmelCase_ ) model.to(UpperCAmelCase_ ) model.eval() __UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : List[Any] = model( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" __UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : List[Any] = config_and_inputs __UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE = ( ( MraModel, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = () def lowerCamelCase_ ( self : List[str] ): """simple docstring""" __UpperCAmelCase : Any = MraModelTester(self ) __UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def lowerCamelCase_ ( self : Tuple ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __UpperCAmelCase : List[Any] = type self.model_tester.create_and_check_model(*UpperCAmelCase_ ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase_ ) def lowerCamelCase_ ( self : str ): """simple docstring""" __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_ ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ ) @slow def lowerCamelCase_ ( self : Tuple ): """simple docstring""" for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[str] = MraModel.from_pretrained(UpperCAmelCase_ ) self.assertIsNotNone(UpperCAmelCase_ ) @unittest.skip(reason="MRA does not output attentions" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" return @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase_ ( self : List[str] ): """simple docstring""" __UpperCAmelCase : Dict = MraModel.from_pretrained("uw-madison/mra-base-512-4" ) __UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Union[str, Any] = model(UpperCAmelCase_ )[0] __UpperCAmelCase : Tuple = torch.Size((1, 256, 768) ) self.assertEqual(output.shape , UpperCAmelCase_ ) __UpperCAmelCase : Optional[Any] = torch.tensor( [[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self : int ): """simple docstring""" __UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" ) __UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : Tuple = model(UpperCAmelCase_ )[0] __UpperCAmelCase : Any = 50_265 __UpperCAmelCase : List[Any] = torch.Size((1, 256, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase_ ) __UpperCAmelCase : int = torch.tensor( [[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) ) @slow def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" __UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" ) __UpperCAmelCase : Dict = torch.arange(4_096 ).unsqueeze(0 ) with torch.no_grad(): __UpperCAmelCase : int = model(UpperCAmelCase_ )[0] __UpperCAmelCase : List[str] = 50_265 __UpperCAmelCase : Dict = torch.Size((1, 4_096, vocab_size) ) self.assertEqual(output.shape , UpperCAmelCase_ ) __UpperCAmelCase : Union[str, Any] = torch.tensor( [[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
37
'''simple docstring''' # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import platform import numpy as np import psutil import torch from accelerate import __version__ as version from accelerate.commands.config import default_config_file, load_config_from_file from ..utils import is_npu_available, is_xpu_available def __UpperCamelCase ( _UpperCAmelCase=None ): if subparsers is not None: __UpperCAmelCase : Optional[int] = subparsers.add_parser("env" ) else: __UpperCAmelCase : List[Any] = argparse.ArgumentParser("Accelerate env command" ) parser.add_argument( "--config_file", default=_UpperCAmelCase, help="The config file to use for the default values in the launching script." ) if subparsers is not None: parser.set_defaults(func=_UpperCAmelCase ) return parser def __UpperCamelCase ( _UpperCAmelCase ): __UpperCAmelCase : Dict = torch.__version__ __UpperCAmelCase : str = torch.cuda.is_available() __UpperCAmelCase : str = is_xpu_available() __UpperCAmelCase : List[Any] = is_npu_available() __UpperCAmelCase : Union[str, Any] = "Not found" # Get the default from the config file. if args.config_file is not None or os.path.isfile(_UpperCAmelCase ): __UpperCAmelCase : Union[str, Any] = load_config_from_file(args.config_file ).to_dict() __UpperCAmelCase : List[str] = { "`Accelerate` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Numpy version": np.__version__, "PyTorch version (GPU?)": F"{pt_version} ({pt_cuda_available})", "PyTorch XPU available": str(_UpperCAmelCase ), "PyTorch NPU available": str(_UpperCAmelCase ), "System RAM": F"{psutil.virtual_memory().total / 1024 ** 3:.2f} GB", } if pt_cuda_available: __UpperCAmelCase : int = torch.cuda.get_device_name() print("\nCopy-and-paste the text below in your GitHub issue\n" ) print("\n".join([F"- {prop}: {val}" for prop, val in info.items()] ) ) print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" ) __UpperCAmelCase : Tuple = ( "\n".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else F"\t{accelerate_config}" ) print(_UpperCAmelCase ) __UpperCAmelCase : Any = accelerate_config return info def __UpperCamelCase ( ): __UpperCAmelCase : Tuple = env_command_parser() __UpperCAmelCase : Dict = parser.parse_args() env_command(_UpperCAmelCase ) return 0 if __name__ == "__main__": raise SystemExit(main())
37
1
import random from typing import Any def __A ( __lowerCAmelCase )-> list[Any]: """simple docstring""" for _ in range(len(__lowerCAmelCase ) ): _UpperCAmelCase = random.randint(0 , len(__lowerCAmelCase ) - 1 ) _UpperCAmelCase = random.randint(0 , len(__lowerCAmelCase ) - 1 ) _UpperCAmelCase , _UpperCAmelCase = data[b], data[a] return data if __name__ == "__main__": _a = [0, 1, 2, 3, 4, 5, 6, 7] _a = ['''python''', '''says''', '''hello''', '''!'''] print('''Fisher-Yates Shuffle:''') print('''List''', integers, strings) print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
39
import json import logging import os import sys from pathlib import Path import finetune_rag from transformers.file_utils import is_apex_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, require_ray, require_torch_gpu, require_torch_multi_gpu, ) logging.basicConfig(level=logging.DEBUG) _a = logging.getLogger() _a = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) class __lowerCamelCase ( snake_case__): """simple docstring""" def UpperCamelCase ( self , UpperCAmelCase ): """simple docstring""" os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) _UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'} _UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2} for split in ["train", "test", "val"]: for field in ["source", "target"]: _UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] ) with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f: f.write(UpperCAmelCase ) def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ): """simple docstring""" _UpperCAmelCase = self.get_auto_remove_tmp_dir() _UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' ) _UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' ) self._create_dummy_data(data_dir=UpperCAmelCase ) _UpperCAmelCase = F""" --data_dir {data_dir} \ --output_dir {output_dir} \ --model_name_or_path facebook/rag-sequence-base \ --model_type rag_sequence \ --do_train \ --do_predict \ --n_val -1 \ --val_check_interval 1.0 \ --train_batch_size 2 \ --eval_batch_size 1 \ --max_source_length 25 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-04 \ --num_train_epochs 1 \ --warmup_steps 4 \ --gradient_accumulation_steps 1 \ --distributed-port 8787 \ --use_dummy_dataset 1 \ --distributed_retriever {distributed_retriever} \ """.split() if gpus > 0: testargs.append(F"""--gpus={gpus}""" ) if is_apex_available(): testargs.append('--fp16' ) else: testargs.append('--gpus=0' ) testargs.append('--distributed_backend=ddp_cpu' ) testargs.append('--num_processes=2' ) _UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs execute_subprocess_async(UpperCAmelCase , env=self.get_env() ) _UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' ) with open(UpperCAmelCase ) as f: _UpperCAmelCase = json.load(UpperCAmelCase ) return result @require_torch_gpu def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self._run_finetune(gpus=1 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self._run_finetune(gpus=2 ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_gpu @require_ray def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 ) @require_torch_multi_gpu @require_ray def UpperCamelCase ( self ): """simple docstring""" _UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' ) self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
39
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase : Dict ={'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : List[Any] =['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Optional[Any] =['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Dict =[ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
353
import argparse import os import re import packaging.version lowerCamelCase : Optional[Any] ='''examples/''' lowerCamelCase : List[Any] ={ '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCamelCase : List[str] ={ '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } lowerCamelCase : int ='''README.md''' def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]: with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCamelCase__ : List[Any] = f.read() UpperCamelCase__ , UpperCamelCase__ : List[str] = REPLACE_PATTERNS[pattern] UpperCamelCase__ : Union[str, Any] = replace.replace("VERSION" , __lowerCAmelCase ) UpperCamelCase__ : Tuple = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase ) with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.write(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Union[str, Any]: for folder, directories, fnames in os.walk(__lowerCAmelCase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("research_projects" ) if "legacy" in directories: directories.remove("legacy" ) for fname in fnames: if fname.endswith(".py" ): update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="examples" ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase=False ) -> Optional[int]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) if not patch: update_version_in_examples(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: UpperCamelCase__ : Tuple = "🤗 Transformers currently provides the following architectures" UpperCamelCase__ : Tuple = "1. Want to contribute a new model?" with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f: UpperCamelCase__ : Optional[int] = f.readlines() # Find the start of the list. UpperCamelCase__ : List[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 UpperCamelCase__ : Dict = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("1." ): UpperCamelCase__ : str = lines[index].replace( "https://huggingface.co/docs/transformers/main/model_doc" , "https://huggingface.co/docs/transformers/model_doc" , ) index += 1 with open(__lowerCAmelCase , "w" , encoding="utf-8" , newline="\n" ) as f: f.writelines(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( ) -> Tuple: with open(REPLACE_FILES["init"] , "r" ) as f: UpperCamelCase__ : str = f.read() UpperCamelCase__ : Dict = REPLACE_PATTERNS["init"][0].search(__lowerCAmelCase ).groups()[0] return packaging.version.parse(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase=False ) -> Optional[int]: UpperCamelCase__ : Dict = get_version() if patch and default_version.is_devrelease: raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" ) if default_version.is_devrelease: UpperCamelCase__ : List[str] = default_version.base_version elif patch: UpperCamelCase__ : int = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}' else: UpperCamelCase__ : Tuple = f'{default_version.major}.{default_version.minor + 1}.0' # Now let's ask nicely if that's the right one. UpperCamelCase__ : Tuple = input(f'Which version are you releasing? [{default_version}]' ) if len(__lowerCAmelCase ) == 0: UpperCamelCase__ : Any = default_version print(f'Updating version to {version}.' ) global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase ) if not patch: print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() def SCREAMING_SNAKE_CASE ( ) -> int: UpperCamelCase__ : str = get_version() UpperCamelCase__ : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0' UpperCamelCase__ : int = current_version.base_version # Check with the user we got that right. UpperCamelCase__ : List[str] = input(f'Which version are we developing now? [{dev_version}]' ) if len(__lowerCAmelCase ) == 0: UpperCamelCase__ : Optional[Any] = dev_version print(f'Updating version to {version}.' ) global_version_update(__lowerCAmelCase ) print("Cleaning main README, don't forget to run `make fix-copies`." ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCamelCase : List[Any] =argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCamelCase : Optional[Any] =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
196
0
'''simple docstring''' def a_ ( lowerCamelCase : list ): if len(lowerCamelCase ) <= 1: return [tuple(lowerCamelCase )] lowerCAmelCase = [] def generate(lowerCamelCase : int , lowerCamelCase : list ): if k == 1: res.append(tuple(arr[:] ) ) return generate(k - 1 , lowerCamelCase ) for i in range(k - 1 ): if k % 2 == 0: # k is even lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[i] else: # k is odd lowerCAmelCase , lowerCAmelCase = arr[k - 1], arr[0] generate(k - 1 , lowerCamelCase ) generate(len(lowerCamelCase ) , lowerCamelCase ) return res if __name__ == "__main__": __snake_case =input("""Enter numbers separated by a comma:\n""").strip() __snake_case =[int(item) for item in user_input.split(""",""")] print(heaps(arr))
4
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin snake_case_ = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : List[Any] = BartphoTokenizer A_ : List[str] = False A_ : Optional[Any] = True def a (self : Tuple ): """simple docstring""" super().setUp() __snake_case = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] __snake_case = dict(zip(a__ , range(len(a__ ) ) ) ) __snake_case = {'''unk_token''': '''<unk>'''} __snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''monolingual_vocab_file'''] ) with open(self.monolingual_vocab_file , '''w''' , encoding='''utf-8''' ) as fp: for token in vocab_tokens: fp.write(f"""{token} {vocab_tokens[token]}\n""" ) __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def a (self : str , **a__ : str ): """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **a__ ) def a (self : str , a__ : Any ): """simple docstring""" __snake_case = '''This is a là test''' __snake_case = '''This is a<unk><unk> test''' return input_text, output_text def a (self : Dict ): """simple docstring""" __snake_case = BartphoTokenizer(a__ , self.monolingual_vocab_file , **self.special_tokens_map ) __snake_case = '''This is a là test''' __snake_case = '''▁This ▁is ▁a ▁l à ▁t est'''.split() __snake_case = tokenizer.tokenize(a__ ) self.assertListEqual(a__ , a__ ) __snake_case = tokens + [tokenizer.unk_token] __snake_case = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
24
0
'''simple docstring''' def _a ( _lowercase : str ): '''simple docstring''' __UpperCAmelCase : Any = hex_num.strip() if not hex_num: raise ValueError('''No value was passed to the function''' ) __UpperCAmelCase : str = hex_num[0] == '''-''' if is_negative: __UpperCAmelCase : List[str] = hex_num[1:] try: __UpperCAmelCase : Optional[int] = int(_lowercase , 16 ) except ValueError: raise ValueError('''Invalid value was passed to the function''' ) __UpperCAmelCase : Union[str, Any] = '''''' while int_num > 0: __UpperCAmelCase : List[Any] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('''-''' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
240
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import LayoutLMConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.layoutlm.modeling_tf_layoutlm import ( TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFLayoutLMForMaskedLM, TFLayoutLMForQuestionAnswering, TFLayoutLMForSequenceClassification, TFLayoutLMForTokenClassification, TFLayoutLMModel, ) class a : """simple docstring""" def __init__( self : Union[str, Any] , snake_case : str , snake_case : Dict=13 , snake_case : Optional[Any]=7 , snake_case : Tuple=True , snake_case : Optional[int]=True , snake_case : str=True , snake_case : int=True , snake_case : List[str]=99 , snake_case : Any=32 , snake_case : List[str]=2 , snake_case : Tuple=4 , snake_case : Union[str, Any]=37 , snake_case : Dict="gelu" , snake_case : str=0.1 , snake_case : List[Any]=0.1 , snake_case : Any=512 , snake_case : Optional[Any]=16 , snake_case : Optional[int]=2 , snake_case : Union[str, Any]=0.02 , snake_case : List[Any]=3 , snake_case : str=4 , snake_case : int=None , snake_case : Union[str, Any]=1000 , ) -> Tuple: __UpperCAmelCase : int = parent __UpperCAmelCase : Optional[int] = batch_size __UpperCAmelCase : Dict = seq_length __UpperCAmelCase : List[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : List[Any] = use_token_type_ids __UpperCAmelCase : str = use_labels __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : Dict = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : List[str] = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : int = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : List[str] = max_position_embeddings __UpperCAmelCase : str = type_vocab_size __UpperCAmelCase : Dict = type_sequence_label_size __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : Optional[int] = num_labels __UpperCAmelCase : Optional[Any] = num_choices __UpperCAmelCase : List[Any] = scope __UpperCAmelCase : str = range_bbox def lowerCamelCase__ ( self : Tuple ) -> Union[str, Any]: __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # convert bbox to numpy since TF does not support item assignment __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy() # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: __UpperCAmelCase : Optional[int] = bbox[i, j, 3] __UpperCAmelCase : Any = bbox[i, j, 1] __UpperCAmelCase : List[Any] = t if bbox[i, j, 2] < bbox[i, j, 0]: __UpperCAmelCase : str = bbox[i, j, 2] __UpperCAmelCase : List[Any] = bbox[i, j, 0] __UpperCAmelCase : Dict = t __UpperCAmelCase : Any = tf.convert_to_tensor(snake_case ) __UpperCAmelCase : List[Any] = None if self.use_input_mask: __UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : str = None if self.use_labels: __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Optional[int] = LayoutLMConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : List[str] , snake_case : int , snake_case : str , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Any , snake_case : List[Any] , snake_case : Any ) -> Optional[Any]: __UpperCAmelCase : Tuple = TFLayoutLMModel(config=snake_case ) __UpperCAmelCase : Optional[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) __UpperCAmelCase : Tuple = model(snake_case , snake_case , token_type_ids=snake_case ) __UpperCAmelCase : List[Any] = model(snake_case , snake_case ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Dict , snake_case : str , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : str ) -> int: __UpperCAmelCase : Any = TFLayoutLMForMaskedLM(config=snake_case ) __UpperCAmelCase : List[Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Tuple , snake_case : Any , snake_case : Dict , snake_case : str , snake_case : Tuple , snake_case : str , snake_case : Optional[Any] , snake_case : str , snake_case : str ) -> Any: __UpperCAmelCase : List[str] = self.num_labels __UpperCAmelCase : Optional[int] = TFLayoutLMForSequenceClassification(config=snake_case ) __UpperCAmelCase : Any = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : Dict , snake_case : List[str] , snake_case : Dict , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Tuple , snake_case : List[str] ) -> List[str]: __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : Optional[int] = TFLayoutLMForTokenClassification(config=snake_case ) __UpperCAmelCase : Any = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : int , snake_case : Dict , snake_case : int , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Tuple , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] ) -> Dict: __UpperCAmelCase : int = TFLayoutLMForQuestionAnswering(config=snake_case ) __UpperCAmelCase : Union[str, Any] = model(snake_case , snake_case , attention_mask=snake_case , token_type_ids=snake_case ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Dict ) -> List[str]: __UpperCAmelCase : str = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) : Tuple = config_and_inputs __UpperCAmelCase : Any = { '''input_ids''': input_ids, '''bbox''': bbox, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask, } return config, inputs_dict @require_tf class a ( _a , _a , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = ( ( TFLayoutLMModel, TFLayoutLMForMaskedLM, TFLayoutLMForTokenClassification, TFLayoutLMForSequenceClassification, TFLayoutLMForQuestionAnswering, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE : Optional[int] = ( { "feature-extraction": TFLayoutLMModel, "fill-mask": TFLayoutLMForMaskedLM, "text-classification": TFLayoutLMForSequenceClassification, "token-classification": TFLayoutLMForTokenClassification, "zero-shot": TFLayoutLMForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE : Dict = False SCREAMING_SNAKE_CASE : Any = True SCREAMING_SNAKE_CASE : List[str] = 1_0 def lowerCamelCase__ ( self : Optional[Any] ) -> Any: __UpperCAmelCase : Optional[int] = TFLayoutLMModelTester(self ) __UpperCAmelCase : Dict = ConfigTester(self , config_class=snake_case , hidden_size=37 ) def lowerCamelCase__ ( self : Any ) -> Dict: self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Dict ) -> List[str]: __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case ) def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case ) def lowerCamelCase__ ( self : List[Any] ) -> Any: __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case ) def lowerCamelCase__ ( self : int ) -> List[Any]: __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*snake_case ) def lowerCamelCase__ ( self : Dict ) -> List[Any]: __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case ) @slow def lowerCamelCase__ ( self : List[str] ) -> Union[str, Any]: for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : int = TFLayoutLMModel.from_pretrained(snake_case ) self.assertIsNotNone(snake_case ) @unittest.skip('''Onnx compliancy broke with TF 2.10''' ) def lowerCamelCase__ ( self : Dict ) -> Dict: pass def _a ( ): '''simple docstring''' __UpperCAmelCase : str = tf.convert_to_tensor([[101,1019,1014,1016,1037,12849,4747,1004,14246,2278,5439,4524,5002,2930,2193,2930,4341,3208,1005,1055,2171,2848,11300,3531,102],[101,4070,4034,7020,1024,3058,1015,1013,2861,1013,6070,19274,2772,6205,27814,16147,16147,4343,2047,10283,10969,14389,1012,2338,102]] ) # noqa: E231 __UpperCAmelCase : Optional[Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231 __UpperCAmelCase : Optional[Any] = tf.convert_to_tensor([[[0,0,0,0],[423,237,440,251],[427,272,441,287],[419,115,437,129],[961,885,992,912],[256,38,330,58],[256,38,330,58],[336,42,353,57],[360,39,401,56],[360,39,401,56],[411,39,471,59],[479,41,528,59],[533,39,630,60],[67,113,134,131],[141,115,209,132],[68,149,133,166],[141,149,187,164],[195,148,287,165],[195,148,287,165],[195,148,287,165],[295,148,349,165],[441,149,492,166],[497,149,546,164],[64,201,125,218],[1000,1000,1000,1000]],[[0,0,0,0],[662,150,754,166],[665,199,742,211],[519,213,554,228],[519,213,554,228],[134,433,187,454],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[130,467,204,480],[314,469,376,482],[504,684,582,706],[941,825,973,900],[941,825,973,900],[941,825,973,900],[941,825,973,900],[610,749,652,765],[130,659,168,672],[176,657,237,672],[238,657,312,672],[443,653,628,672],[443,653,628,672],[716,301,825,317],[1000,1000,1000,1000]]] ) # noqa: E231 __UpperCAmelCase : str = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231 # these are sequence labels (i.e. at the token level) __UpperCAmelCase : Optional[int] = tf.convert_to_tensor([[-100,10,10,10,9,1,-100,7,7,-100,7,7,4,2,5,2,8,8,-100,-100,5,0,3,2,-100],[-100,12,12,12,-100,12,10,-100,-100,-100,-100,10,12,9,-100,-100,-100,10,10,10,9,12,-100,10,-100]] ) # noqa: E231 # fmt: on return input_ids, attention_mask, bbox, token_type_ids, labels @require_tf class a ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : List[str] ) -> Optional[int]: __UpperCAmelCase : int = TFLayoutLMModel.from_pretrained('''microsoft/layoutlm-base-uncased''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = prepare_layoutlm_batch_inputs() # forward pass __UpperCAmelCase : Dict = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the sequence output on [0, :3, :3] __UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor( [[0.1_785, -0.1_947, -0.0_425], [-0.3_254, -0.2_807, 0.2_553], [-0.5_391, -0.3_322, 0.3_364]] , ) self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1E-3 ) ) # test the pooled output on [1, :3] __UpperCAmelCase : str = tf.convert_to_tensor([-0.6_580, -0.0_214, 0.8_552] ) self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , snake_case , atol=1E-3 ) ) @slow def lowerCamelCase__ ( self : Optional[int] ) -> int: # initialize model with randomly initialized sequence classification head __UpperCAmelCase : str = TFLayoutLMForSequenceClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=2 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = prepare_layoutlm_batch_inputs() # forward pass __UpperCAmelCase : Tuple = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=tf.convert_to_tensor([1, 1] ) , ) # test whether we get a loss as a scalar __UpperCAmelCase : str = outputs.loss __UpperCAmelCase : Optional[Any] = (2,) self.assertEqual(loss.shape , snake_case ) # test the shape of the logits __UpperCAmelCase : List[str] = outputs.logits __UpperCAmelCase : List[Any] = (2, 2) self.assertEqual(logits.shape , snake_case ) @slow def lowerCamelCase__ ( self : List[Any] ) -> str: # initialize model with randomly initialized token classification head __UpperCAmelCase : Union[str, Any] = TFLayoutLMForTokenClassification.from_pretrained('''microsoft/layoutlm-base-uncased''' , num_labels=13 ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = prepare_layoutlm_batch_inputs() # forward pass __UpperCAmelCase : Tuple = model( input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case ) # test the shape of the logits __UpperCAmelCase : List[Any] = outputs.logits __UpperCAmelCase : Optional[int] = tf.convert_to_tensor((2, 25, 13) ) self.assertEqual(logits.shape , snake_case ) @slow def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]: # initialize model with randomly initialized token classification head __UpperCAmelCase : Dict = TFLayoutLMForQuestionAnswering.from_pretrained('''microsoft/layoutlm-base-uncased''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Tuple = prepare_layoutlm_batch_inputs() # forward pass __UpperCAmelCase : Optional[Any] = model(input_ids=snake_case , bbox=snake_case , attention_mask=snake_case , token_type_ids=snake_case ) # test the shape of the logits __UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor((2, 25) ) self.assertEqual(outputs.start_logits.shape , snake_case ) self.assertEqual(outputs.end_logits.shape , snake_case )
240
1
'''simple docstring''' import unittest import numpy as np from transformers import RoFormerConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roformer.modeling_flax_roformer import ( FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, ) class __A ( unittest.TestCase ): def __init__(self : List[str] , __a : Union[str, Any] , __a : List[str]=13 , __a : List[Any]=7 , __a : Union[str, Any]=True , __a : Any=True , __a : List[Any]=True , __a : str=True , __a : List[Any]=99 , __a : Any=32 , __a : Tuple=5 , __a : Union[str, Any]=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : int=0.1 , __a : str=0.1 , __a : List[Any]=512 , __a : Optional[int]=16 , __a : Tuple=2 , __a : Any=0.02 , __a : Union[str, Any]=4 , ): UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_attention_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_choices def _lowercase (self : int ): UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_attention_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def _lowercase (self : List[Any] ): UpperCAmelCase_ = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict @require_flax class __A ( UpperCamelCase__ , unittest.TestCase ): a__ : Dict = True a__ : Dict = ( ( FlaxRoFormerModel, FlaxRoFormerForMaskedLM, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, ) if is_flax_available() else () ) def _lowercase (self : Dict ): UpperCAmelCase_ = FlaxRoFormerModelTester(self ) @slow def _lowercase (self : Optional[Any] ): for model_class_name in self.all_model_classes: UpperCAmelCase_ = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__a ) UpperCAmelCase_ = model(np.ones((1, 1) ) ) self.assertIsNotNone(__a ) @require_flax class __A ( unittest.TestCase ): @slow def _lowercase (self : List[Any] ): UpperCAmelCase_ = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) UpperCAmelCase_ = jnp.array([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ = model(__a )[0] UpperCAmelCase_ = 50000 UpperCAmelCase_ = (1, 6, vocab_size) self.assertEqual(output.shape , __a ) UpperCAmelCase_ = jnp.array( [[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] ) self.assertTrue(jnp.allclose(output[:, :3, :3] , __a , atol=1E-4 ) )
1
def lowerCamelCase__ ( ): '''simple docstring''' snake_case_ = [] snake_case_ = 1 while len(_A ) < 1E6: constant.append(str(_A ) ) i += 1 snake_case_ = "".join(_A ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
187
0
"""simple docstring""" import os from typing import Any, Callable, Dict, List, Optional, Tuple, Union import torch from torch import nn from ...models.controlnet import ControlNetModel, ControlNetOutput from ...models.modeling_utils import ModelMixin from ...utils import logging __UpperCAmelCase = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE ( A__ ): def __init__( self , __A ) -> Tuple: super().__init__() lowerCAmelCase_ :int = nn.ModuleList(__A ) def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , __A = None , __A = None , __A = None , __A = None , __A = False , __A = True , ) -> Union[ControlNetOutput, Tuple]: for i, (image, scale, controlnet) in enumerate(zip(__A , __A , self.nets ) ): lowerCAmelCase_ :Optional[int] = controlnet( __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , __A , ) # merge samples if i == 0: lowerCAmelCase_ :Optional[int] = down_samples, mid_sample else: lowerCAmelCase_ :str = [ samples_prev + samples_curr for samples_prev, samples_curr in zip(__A , __A ) ] mid_block_res_sample += mid_sample return down_block_res_samples, mid_block_res_sample def __lowerCAmelCase ( self , __A , __A = True , __A = None , __A = False , __A = None , ) -> Dict: lowerCAmelCase_ :List[str] = 0 lowerCAmelCase_ :Any = save_directory for controlnet in self.nets: controlnet.save_pretrained( __A , is_main_process=__A , save_function=__A , safe_serialization=__A , variant=__A , ) idx += 1 lowerCAmelCase_ :Optional[Any] = model_path_to_save + f"""_{idx}""" @classmethod def __lowerCAmelCase ( cls , __A , **__A ) -> int: lowerCAmelCase_ :Optional[int] = 0 lowerCAmelCase_ :Union[str, Any] = [] # load controlnet and append to list until no controlnet directory exists anymore # first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained` # second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ... lowerCAmelCase_ :Union[str, Any] = pretrained_model_path while os.path.isdir(__A ): lowerCAmelCase_ :Optional[int] = ControlNetModel.from_pretrained(__A , **__A ) controlnets.append(__A ) idx += 1 lowerCAmelCase_ :int = pretrained_model_path + f"""_{idx}""" logger.info(f"""{len(__A )} controlnets loaded from {pretrained_model_path}.""" ) if len(__A ) == 0: raise ValueError( f"""No ControlNets found under {os.path.dirname(__A )}. Expected at least {pretrained_model_path + "_0"}.""" ) return cls(__A )
355
"""simple docstring""" import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __UpperCAmelCase = get_tests_dir('fixtures/dummy-config.json') class _SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = 0 def __lowerCAmelCase ( self ) -> List[str]: self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) ) def __lowerCAmelCase ( self ) -> Tuple: lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""bert-base-uncased""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: lowerCAmelCase_ :int = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Any: lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Dict: lowerCAmelCase_ :int = AutoConfig.for_model("""roberta""" ) self.assertIsInstance(__A , __A ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. lowerCAmelCase_ :int = os.path.join(__A , """fake-roberta""" ) os.makedirs(__A , exist_ok=__A ) with open(os.path.join(__A , """config.json""" ) , """w""" ) as f: f.write(json.dumps({} ) ) lowerCAmelCase_ :Any = AutoConfig.from_pretrained(__A ) self.assertEqual(type(__A ) , __A ) def __lowerCAmelCase ( self ) -> Optional[int]: try: AutoConfig.register("""custom""" , __A ) # Wrong model type will raise an error with self.assertRaises(__A ): AutoConfig.register("""model""" , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoConfig.register("""bert""" , __A ) # Now that the config is registered, it can be used as any other config with the auto-API lowerCAmelCase_ :Union[str, Any] = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Optional[int] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def __lowerCAmelCase ( self ) -> Tuple: with self.assertRaisesRegex( __A , """bert-base is not a local folder and is not a valid model identifier""" ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""bert-base""" ) def __lowerCAmelCase ( self ) -> Any: with self.assertRaisesRegex( __A , r"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , revision="""aaaaaa""" ) def __lowerCAmelCase ( self ) -> int: with self.assertRaisesRegex( __A , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ): lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" ) def __lowerCAmelCase ( self ) -> Tuple: # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): lowerCAmelCase_ :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): lowerCAmelCase_ :List[str] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) lowerCAmelCase_ :str = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) lowerCAmelCase_ :Dict = AutoConfig.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" ) def __lowerCAmelCase ( self ) -> int: class _SCREAMING_SNAKE_CASE ( A__ ): UpperCAmelCase_ :int = "new-model" try: AutoConfig.register("""new-model""" , __A ) # If remote code is not set, the default is to use local lowerCAmelCase_ :Any = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote code is disabled, we load the local one. lowerCAmelCase_ :Union[str, Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" ) # If remote is enabled, we load from the Hub lowerCAmelCase_ :Optional[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , """NewModelConfig""" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
1
0
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): def __magic_name__ ( self : List[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Dict = 1 UpperCAmelCase : Tuple = 3 UpperCAmelCase : str = (3_2, 3_2) UpperCAmelCase : Tuple = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(_snake_case ) return image @property def __magic_name__ ( self : int ): torch.manual_seed(0 ) UpperCAmelCase : str = UNetaDConditionModel( block_out_channels=(3_2, 6_4), layers_per_block=2, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D'''), up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D'''), cross_attention_dim=3_2, ) return model @property def __magic_name__ ( self : Dict ): torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''], up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''], latent_channels=4, ) return model @property def __magic_name__ ( self : int ): torch.manual_seed(0 ) UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, ) return CLIPTextModel(_snake_case ) @property def __magic_name__ ( self : List[Any] ): def extract(*__A : int, **__A : int ): class __UpperCAmelCase : def __init__( self : int ): UpperCAmelCase : Union[str, Any] = torch.ones([0] ) def __magic_name__ ( self : Optional[Any], __A : List[Any] ): self.pixel_values.to(_snake_case ) return self return Out() return extract def __magic_name__ ( self : Dict ): UpperCAmelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : List[Any] = self.dummy_cond_unet UpperCAmelCase : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, beta_schedule='''scaled_linear''', clip_sample=_snake_case, set_alpha_to_one=_snake_case, ) UpperCAmelCase : str = self.dummy_vae UpperCAmelCase : int = self.dummy_text_encoder UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk UpperCAmelCase : str = StableDiffusionPipeline( unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, ) UpperCAmelCase : Dict = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger''' UpperCAmelCase : List[Any] = torch.Generator(device=_snake_case ).manual_seed(0 ) UpperCAmelCase : int = sd_pipe([prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' ) UpperCAmelCase : Any = output.images UpperCAmelCase : Union[str, Any] = torch.Generator(device=_snake_case ).manual_seed(0 ) UpperCAmelCase : int = sd_pipe( [prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=_snake_case, )[0] UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase : Tuple = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase : str = self.dummy_vae UpperCAmelCase : int = self.dummy_text_encoder UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk UpperCAmelCase : Optional[int] = StableDiffusionPipeline( unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, ) UpperCAmelCase : Any = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase : int = '''A painting of a squirrel eating a burger''' UpperCAmelCase : str = torch.Generator(device=_snake_case ).manual_seed(0 ) UpperCAmelCase : Optional[Any] = sd_pipe([prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''' ) UpperCAmelCase : int = output.images UpperCAmelCase : Tuple = torch.Generator(device=_snake_case ).manual_seed(0 ) UpperCAmelCase : int = sd_pipe( [prompt], generator=_snake_case, guidance_scale=6.0, num_inference_steps=2, output_type='''np''', return_dict=_snake_case, )[0] UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] UpperCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase : int = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def __magic_name__ ( self : Any ): UpperCAmelCase : List[Any] = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''', safety_checker=_snake_case ) assert isinstance(_snake_case, _snake_case ) assert isinstance(pipe.scheduler, _snake_case ) assert pipe.safety_checker is None UpperCAmelCase : Union[str, Any] = pipe('''example prompt''', num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(_snake_case ) UpperCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(_snake_case ) # sanity check that the pipeline still works assert pipe.safety_checker is None UpperCAmelCase : str = pipe('''example prompt''', num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''', '''This test requires a GPU''' ) def __magic_name__ ( self : str ): UpperCAmelCase : Any = self.dummy_cond_unet UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=_snake_case ) UpperCAmelCase : int = self.dummy_vae UpperCAmelCase : List[str] = self.dummy_text_encoder UpperCAmelCase : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 UpperCAmelCase : Dict = unet.half() UpperCAmelCase : Any = vae.half() UpperCAmelCase : Tuple = bert.half() # make sure here that pndm scheduler skips prk UpperCAmelCase : Union[str, Any] = StableDiffusionPipeline( unet=_snake_case, scheduler=_snake_case, vae=_snake_case, text_encoder=_snake_case, tokenizer=_snake_case, safety_checker=_snake_case, feature_extractor=self.dummy_extractor, ) UpperCAmelCase : str = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase : Any = '''A painting of a squirrel eating a burger''' UpperCAmelCase : Union[str, Any] = sd_pipe([prompt], num_inference_steps=2, output_type='''np''' ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class __UpperCAmelCase ( unittest.TestCase ): def __magic_name__ ( self : Any ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Optional[Any] ): UpperCAmelCase : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=_snake_case ) UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase : str = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase : Optional[int] = ( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) UpperCAmelCase : List[Any] = 4_0_0_3_6_6_0_3_4_6 UpperCAmelCase : List[Any] = 7 # without safety guidance (sld_guidance_scale = 0) UpperCAmelCase : Optional[Any] = torch.manual_seed(_snake_case ) UpperCAmelCase : str = sd_pipe( [prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=0, ) UpperCAmelCase : Dict = output.images UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] UpperCAmelCase : int = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) UpperCAmelCase : Optional[int] = torch.manual_seed(_snake_case ) UpperCAmelCase : Optional[Any] = sd_pipe( [prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.0_2_5, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) UpperCAmelCase : List[Any] = output.images UpperCAmelCase : Dict = image[0, -3:, -3:, -1] UpperCAmelCase : Optional[Any] = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __magic_name__ ( self : Any ): UpperCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''', safety_checker=_snake_case ) UpperCAmelCase : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) UpperCAmelCase : Optional[Any] = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase : List[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity''' UpperCAmelCase : Tuple = 2_7_3_4_9_7_1_7_5_5 UpperCAmelCase : List[str] = 7 UpperCAmelCase : Dict = torch.manual_seed(_snake_case ) UpperCAmelCase : Union[str, Any] = sd_pipe( [prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=0, ) UpperCAmelCase : str = output.images UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] UpperCAmelCase : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 UpperCAmelCase : Tuple = torch.manual_seed(_snake_case ) UpperCAmelCase : Union[str, Any] = sd_pipe( [prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.0_2_5, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) UpperCAmelCase : int = output.images UpperCAmelCase : Dict = image[0, -3:, -3:, -1] UpperCAmelCase : Optional[int] = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __magic_name__ ( self : Tuple ): UpperCAmelCase : str = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) UpperCAmelCase : int = sd_pipe.to(_snake_case ) sd_pipe.set_progress_bar_config(disable=_snake_case ) UpperCAmelCase : Optional[int] = ( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) UpperCAmelCase : List[Any] = 1_0_4_4_3_5_5_2_3_4 UpperCAmelCase : Optional[int] = 1_2 UpperCAmelCase : List[Any] = torch.manual_seed(_snake_case ) UpperCAmelCase : Dict = sd_pipe( [prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=0, ) UpperCAmelCase : str = output.images UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 UpperCAmelCase : Optional[int] = torch.manual_seed(_snake_case ) UpperCAmelCase : Any = sd_pipe( [prompt], generator=_snake_case, guidance_scale=_snake_case, num_inference_steps=5_0, output_type='''np''', width=5_1_2, height=5_1_2, sld_guidance_scale=2_0_0_0, sld_warmup_steps=7, sld_threshold=0.0_2_5, sld_momentum_scale=0.5, sld_mom_beta=0.7, ) UpperCAmelCase : Dict = output.images UpperCAmelCase : Dict = image[0, -3:, -3:, -1] UpperCAmelCase : Dict = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
336
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class __A ( A_ ): '''simple docstring''' pass def __UpperCAmelCase ( __lowerCamelCase ) -> Dict: for shard in shards: for i in range(__lowerCamelCase ): yield {"i": i, "shard": shard} def __UpperCAmelCase ( ) -> Tuple: lowercase__ : int = int(os.environ['''RANK'''] ) lowercase__ : str = int(os.environ['''WORLD_SIZE'''] ) lowercase__ : List[Any] = ArgumentParser() parser.add_argument('''--streaming''' , type=__lowerCamelCase ) parser.add_argument('''--local_rank''' , type=__lowerCamelCase ) parser.add_argument('''--num_workers''' , type=__lowerCamelCase , default=0 ) lowercase__ : int = parser.parse_args() lowercase__ : Optional[Any] = args.streaming lowercase__ : List[Any] = args.num_workers lowercase__ : Optional[Any] = {'''shards''': [f"""shard_{shard_idx}""" for shard_idx in range(__lowerCamelCase )]} lowercase__ : Dict = IterableDataset.from_generator(__lowerCamelCase , gen_kwargs=__lowerCamelCase ) if not streaming: lowercase__ : int = Dataset.from_list(list(__lowerCamelCase ) ) lowercase__ : int = split_dataset_by_node(__lowerCamelCase , rank=__lowerCamelCase , world_size=__lowerCamelCase ) lowercase__ : Optional[Any] = torch.utils.data.DataLoader(__lowerCamelCase , num_workers=__lowerCamelCase ) lowercase__ : Optional[Any] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowercase__ : str = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowercase__ : str = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
16
0
'''simple docstring''' from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging __lowerCamelCase = logging.get_logger(__name__) class A__ ( _snake_case ): lowercase = ["pixel_values"] def __init__( self , UpperCamelCase__ = True , UpperCamelCase__ = 1 / 255 , UpperCamelCase__ = True , UpperCamelCase__ = 8 , **UpperCamelCase__ , ) -> None: '''simple docstring''' super().__init__(**UpperCamelCase__ ) A_ = do_rescale A_ = rescale_factor A_ = do_pad A_ = pad_size def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , **UpperCamelCase__ ) -> np.ndarray: '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ) -> Union[str, Any]: '''simple docstring''' A_ , A_ = get_image_size(UpperCamelCase__ ) A_ = (old_height // size + 1) * size - old_height A_ = (old_width // size + 1) * size - old_width return pad(UpperCamelCase__ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=UpperCamelCase__ ) def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = ChannelDimension.FIRST , **UpperCamelCase__ , ) -> Any: '''simple docstring''' A_ = do_rescale if do_rescale is not None else self.do_rescale A_ = rescale_factor if rescale_factor is not None else self.rescale_factor A_ = do_pad if do_pad is not None else self.do_pad A_ = pad_size if pad_size is not None else self.pad_size A_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. A_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_rescale: A_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_pad: A_ = [self.pad(UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] A_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] A_ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
352
'''simple docstring''' import math import sys def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int: if number != int(UpperCAmelCase__ ): raise ValueError("""the value of input must be a natural number""" ) if number < 0: raise ValueError("""the value of input must not be a negative number""" ) if number == 0: return 1 A_ = [-1] * (number + 1) A_ = 0 for i in range(1, number + 1 ): A_ = sys.maxsize A_ = int(math.sqrt(UpperCAmelCase__ ) ) for j in range(1, root + 1 ): A_ = 1 + answers[i - (j**2)] A_ = min(UpperCAmelCase__, UpperCAmelCase__ ) A_ = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
101
0
"""simple docstring""" def __SCREAMING_SNAKE_CASE ( A_ ): lowerCAmelCase__ : int = [0 for i in range(len(A_ ) )] # initialize interval's left pointer and right pointer lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = 0, 0 for i in range(1 , len(A_ ) ): # case when current index is inside the interval if i <= right_pointer: lowerCAmelCase__ : Optional[int] = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowerCAmelCase__ : Tuple = min_edge while go_next(A_ , A_ , A_ ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = i, i + z_result[i] - 1 return z_result def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ ): return i + z_result[i] < len(A_ ) and s[z_result[i]] == s[i + z_result[i]] def __SCREAMING_SNAKE_CASE ( A_ , A_ ): lowerCAmelCase__ : Dict = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowerCAmelCase__ : List[str] = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(A_ ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
106
"""simple docstring""" import importlib import math import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Tuple, Union import flax import jax.numpy as jnp from ..utils import BaseOutput __UpperCamelCase : Optional[Any] = '''scheduler_config.json''' class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = 1 lowercase__ = 2 lowercase__ = 3 lowercase__ = 4 lowercase__ = 5 @dataclass class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = 42 class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = SCHEDULER_CONFIG_NAME lowercase__ = ["dtype"] lowercase__ = [] lowercase__ = True @classmethod def __lowerCAmelCase ( cls : List[Any] ,lowercase_ : Dict[str, Any] = None ,lowercase_ : Optional[str] = None ,lowercase_ : Optional[int]=False ,**lowercase_ : Any ,): lowerCAmelCase__ ,lowerCAmelCase__ : Dict = cls.load_config( pretrained_model_name_or_path=lowercase_ ,subfolder=lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ,) lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = cls.from_config(lowercase_ ,return_unused_kwargs=lowercase_ ,**lowercase_ ) if hasattr(lowercase_ ,'''create_state''' ) and getattr(lowercase_ ,'''has_state''' ,lowercase_ ): lowerCAmelCase__ : List[Any] = scheduler.create_state() if return_unused_kwargs: return scheduler, state, unused_kwargs return scheduler, state def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, os.PathLike] ,lowercase_ : bool = False ,**lowercase_ : str ): self.save_config(save_directory=lowercase_ ,push_to_hub=lowercase_ ,**lowercase_ ) @property def __lowerCAmelCase ( self : List[str] ): return self._get_compatibles() @classmethod def __lowerCAmelCase ( cls : List[Any] ): lowerCAmelCase__ : Tuple = list(set([cls.__name__] + cls._compatibles ) ) lowerCAmelCase__ : Tuple = importlib.import_module(__name__.split('''.''' )[0] ) lowerCAmelCase__ : Union[str, Any] = [ getattr(lowercase_ ,lowercase_ ) for c in compatible_classes_str if hasattr(lowercase_ ,lowercase_ ) ] return compatible_classes def __SCREAMING_SNAKE_CASE ( A_ , A_ ): assert len(A_ ) >= x.ndim return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(A_ ) - x.ndim) ) , A_ ) def __SCREAMING_SNAKE_CASE ( A_ , A_=0.999 , A_=jnp.floataa ): def alpha_bar(A_ ): return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2 lowerCAmelCase__ : Optional[Any] = [] for i in range(A_ ): lowerCAmelCase__ : str = i / num_diffusion_timesteps lowerCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar(A_ ) / alpha_bar(A_ ) , A_ ) ) return jnp.array(A_ , dtype=A_ ) @flax.struct.dataclass class SCREAMING_SNAKE_CASE : """simple docstring""" lowercase__ = 42 lowercase__ = 42 lowercase__ = 42 @classmethod def __lowerCAmelCase ( cls : Union[str, Any] ,lowercase_ : List[Any] ): lowerCAmelCase__ : Optional[int] = scheduler.config if config.trained_betas is not None: lowerCAmelCase__ : Any = jnp.asarray(config.trained_betas ,dtype=scheduler.dtype ) elif config.beta_schedule == "linear": lowerCAmelCase__ : Union[str, Any] = jnp.linspace(config.beta_start ,config.beta_end ,config.num_train_timesteps ,dtype=scheduler.dtype ) elif config.beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowerCAmelCase__ : int = ( jnp.linspace( config.beta_start**0.5 ,config.beta_end**0.5 ,config.num_train_timesteps ,dtype=scheduler.dtype ) ** 2 ) elif config.beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowerCAmelCase__ : List[Any] = betas_for_alpha_bar(config.num_train_timesteps ,dtype=scheduler.dtype ) else: raise NotImplementedError( F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' ) lowerCAmelCase__ : str = 1.0 - betas lowerCAmelCase__ : Union[str, Any] = jnp.cumprod(lowercase_ ,axis=0 ) return cls( alphas=lowercase_ ,betas=lowercase_ ,alphas_cumprod=lowercase_ ,) def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ : Any = state.alphas_cumprod lowerCAmelCase__ : Optional[Any] = alphas_cumprod[timesteps] ** 0.5 lowerCAmelCase__ : Tuple = sqrt_alpha_prod.flatten() lowerCAmelCase__ : str = broadcast_to_shape_from_left(A_ , original_samples.shape ) lowerCAmelCase__ : Optional[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5 lowerCAmelCase__ : Optional[Any] = sqrt_one_minus_alpha_prod.flatten() lowerCAmelCase__ : Optional[int] = broadcast_to_shape_from_left(A_ , original_samples.shape ) return sqrt_alpha_prod, sqrt_one_minus_alpha_prod def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ ) lowerCAmelCase__ : str = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ): lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = get_sqrt_alpha_prod(A_ , A_ , A_ , A_ ) lowerCAmelCase__ : Union[str, Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample return velocity
106
1
'''simple docstring''' UpperCAmelCase = 0 # The first color of the flag. UpperCAmelCase = 1 # The second color of the flag. UpperCAmelCase = 2 # The third color of the flag. UpperCAmelCase = (red, white, blue) def _snake_case ( _SCREAMING_SNAKE_CASE : list ) -> list: """simple docstring""" if not sequence: return [] if len(_SCREAMING_SNAKE_CASE ) == 1: return list(_SCREAMING_SNAKE_CASE ) lowerCAmelCase = 0 lowerCAmelCase = len(_SCREAMING_SNAKE_CASE ) - 1 lowerCAmelCase = 0 while mid <= high: if sequence[mid] == colors[0]: lowerCAmelCase, lowerCAmelCase = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowerCAmelCase, lowerCAmelCase = sequence[high], sequence[mid] high -= 1 else: lowerCAmelCase = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_SCREAMING_SNAKE_CASE ) return sequence if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase = input('Enter numbers separated by commas:\n').strip() UpperCAmelCase = [int(item.strip()) for item in user_input.split(',')] print(F'''{dutch_national_flag_sort(unsorted)}''')
371
'''simple docstring''' from __future__ import annotations def _snake_case ( _SCREAMING_SNAKE_CASE : int | str ) -> bool: """simple docstring""" lowerCAmelCase = str(_SCREAMING_SNAKE_CASE ) return n == n[::-1] def _snake_case ( _SCREAMING_SNAKE_CASE : int = 1_000_000 ) -> Dict: """simple docstring""" lowerCAmelCase = 0 for i in range(1 , _SCREAMING_SNAKE_CASE ): if is_palindrome(_SCREAMING_SNAKE_CASE ) and is_palindrome(bin(_SCREAMING_SNAKE_CASE ).split("""b""" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
187
0
'''simple docstring''' from __future__ import annotations import requests def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]: _snake_case = F'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(__SCREAMING_SNAKE_CASE ).json() def SCREAMING_SNAKE_CASE__ ( __A = 10 ) -> Any: _snake_case = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' _snake_case = requests.get(__SCREAMING_SNAKE_CASE ).json()[:max_stories] return [get_hackernews_story(__SCREAMING_SNAKE_CASE ) for story_id in story_ids] def SCREAMING_SNAKE_CASE__ ( __A = 10 ) -> Any: _snake_case = hackernews_top_stories(__SCREAMING_SNAKE_CASE ) return "\n".join('* [{title}]({url})'.format(**__SCREAMING_SNAKE_CASE ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
42
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient UpperCAmelCase = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN''']) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = test_results.split(' ' ) lowercase = 0 lowercase = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. lowercase = expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(__SCREAMING_SNAKE_CASE ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = {} lowercase = None lowercase = False for line in failures_short_lines.split('\n' ): if re.search(r'_ \[doctest\]' , __SCREAMING_SNAKE_CASE ): lowercase = True lowercase = line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): lowercase = line lowercase = False return failures class A_ : '''simple docstring''' def __init__( self , snake_case , snake_case ): lowercase = title lowercase = doc_test_results['time_spent'].split(',' )[0] lowercase = doc_test_results['success'] lowercase = doc_test_results['failures'] lowercase = self.n_success + self.n_failures # Failures and success of the modeling tests lowercase = doc_test_results @property def SCREAMING_SNAKE_CASE__ ( self ): lowercase = [self._time_spent] lowercase = 0 for time in time_spent: lowercase = time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(snake_case ) == 1: lowercase = [0, 0, time_parts[0]] lowercase , lowercase , lowercase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 3600 + minutes * 60 + seconds lowercase , lowercase , lowercase = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60 return F'''{int(snake_case )}h{int(snake_case )}m{int(snake_case )}s''' @property def SCREAMING_SNAKE_CASE__ ( self ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def SCREAMING_SNAKE_CASE__ ( self ): return { "type": "section", "text": { "type": "plain_text", "text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''', "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def SCREAMING_SNAKE_CASE__ ( self ): return { "type": "section", "text": { "type": "plain_text", "text": ( F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in''' F''' {self.time}.''' ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } @property def SCREAMING_SNAKE_CASE__ ( self ): lowercase = 40 lowercase = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(snake_case , snake_case )} lowercase = '' for category, failures in category_failures.items(): if len(snake_case ) == 0: continue if report != "": report += "\n\n" report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(snake_case ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": F'''The following examples had failures:\n\n\n{report}\n''', }, } @property def SCREAMING_SNAKE_CASE__ ( self ): lowercase = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(snake_case ) @staticmethod def SCREAMING_SNAKE_CASE__ ( ): lowercase = [ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''', }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(snake_case )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=snake_case , ) def SCREAMING_SNAKE_CASE__ ( self ): print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) lowercase = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else 'All tests passed.' lowercase = client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=snake_case , ) def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case ): lowercase = '' for key, value in failures.items(): lowercase = value[:200] + ' [Truncated]' if len(snake_case ) > 250 else value failures_text += F'''*{key}*\n_{value}_\n\n''' lowercase = job_name lowercase = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: lowercase = { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def SCREAMING_SNAKE_CASE__ ( self ): if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) lowercase = self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) lowercase = sorted(self.doc_test_results.items() , key=lambda snake_case : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): lowercase = F'''*Num failures* :{len(job_result['failed'] )} \n''' lowercase = job_result['failures'] lowercase = self.get_reply_blocks(snake_case , snake_case , snake_case , text=snake_case ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F'''Results for {job}''' , blocks=snake_case , thread_ts=self.thread_ts['ts'] , ) time.sleep(1 ) def UpperCAmelCase_ ( ): lowercase = os.environ['GITHUB_RUN_ID'] lowercase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100''' lowercase = requests.get(__SCREAMING_SNAKE_CASE ).json() lowercase = {} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) lowercase = math.ceil((result['total_count'] - 100) / 100 ) for i in range(__SCREAMING_SNAKE_CASE ): lowercase = requests.get(url + F'''&page={i + 2}''' ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.' , __SCREAMING_SNAKE_CASE ) return {} def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = {} if os.path.exists(__SCREAMING_SNAKE_CASE ): lowercase = os.listdir(__SCREAMING_SNAKE_CASE ) for file in files: try: with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , encoding='utf-8' ) as f: lowercase = f.read() except UnicodeDecodeError as e: raise ValueError(F'''Could not open {os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}.''' ) from e return _artifact def UpperCAmelCase_ ( ): class A_ : '''simple docstring''' def __init__( self , snake_case ): lowercase = name lowercase = [] def __str__( self ): return self.name def SCREAMING_SNAKE_CASE__ ( self , snake_case ): self.paths.append({'name': self.name, 'path': path} ) lowercase = {} lowercase = filter(os.path.isdir , os.listdir() ) for directory in directories: lowercase = directory if artifact_name not in _available_artifacts: lowercase = Artifact(__SCREAMING_SNAKE_CASE ) _available_artifacts[artifact_name].add_path(__SCREAMING_SNAKE_CASE ) return _available_artifacts if __name__ == "__main__": UpperCAmelCase = get_job_links() UpperCAmelCase = retrieve_available_artifacts() UpperCAmelCase = collections.OrderedDict( [ ('''*.py''', '''API Examples'''), ('''*.md''', '''MD Examples'''), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' UpperCAmelCase = { v: { '''failed''': [], '''failures''': {}, } for v in docs.values() } # Link to the GitHub Action job UpperCAmelCase = github_actions_job_links.get('''run_doctests''') UpperCAmelCase = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0] UpperCAmelCase = retrieve_artifact(artifact_path['''name''']) if "stats" in artifact: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = handle_test_results(artifact['''stats''']) UpperCAmelCase = failed UpperCAmelCase = success UpperCAmelCase = time_spent[1:-1] + ''', ''' UpperCAmelCase = extract_first_line_failure(artifact['''failures_short''']) for line in artifact["summary_short"].split('''\n'''): if re.search('''FAILED''', line): UpperCAmelCase = line.replace('''FAILED ''', '''''') UpperCAmelCase = line.split()[0].replace('''\n''', '''''') if "::" in line: UpperCAmelCase , UpperCAmelCase = line.split('''::''') else: UpperCAmelCase , UpperCAmelCase = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): UpperCAmelCase = docs[file_regex] doc_test_results[category]["failed"].append(test) UpperCAmelCase = all_failures[test] if test in all_failures else '''N/A''' UpperCAmelCase = failure break UpperCAmelCase = Message('''🤗 Results of the doc tests.''', doc_test_results) message.post() message.post_reply()
195
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available _lowercase : int ={ "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple =[ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Any =[ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys _lowercase : List[str] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
371
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class snake_case__ (A__ ): """simple docstring""" def __init__( self , __lowercase , __lowercase ) -> int: """simple docstring""" a__ : Tuple = params a__ : str = np.array(__lowercase ) a__ : List[Any] = np.array([len(__lowercase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowercase ) -> Any: """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ) -> Dict: """simple docstring""" return len(self.lengths ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]: """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def SCREAMING_SNAKE_CASE__( self ) -> Tuple: """simple docstring""" a__ : int = self.params.max_model_input_size a__ : int = self.lengths > max_len logger.info(F'''Splitting {sum(__lowercase )} too long sequences.''' ) def divide_chunks(__lowercase , __lowercase ): return [l[i : i + n] for i in range(0 , len(__lowercase ) , __lowercase )] a__ : Any = [] a__ : Optional[int] = [] if self.params.mlm: a__ , a__ : Any = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: a__ , a__ : Dict = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: a__ : int = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: a__ : str = np.insert(__lowercase , 0 , __lowercase ) if sub_s[-1] != sep_id: a__ : List[str] = np.insert(__lowercase , len(__lowercase ) , __lowercase ) assert len(__lowercase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(__lowercase ) new_tok_ids.extend(__lowercase ) new_lengths.extend([len(__lowercase ) for l in sub_seqs] ) a__ : Optional[int] = np.array(__lowercase ) a__ : Any = np.array(__lowercase ) def SCREAMING_SNAKE_CASE__( self ) -> Optional[Any]: """simple docstring""" a__ : Union[str, Any] = len(self ) a__ : List[str] = self.lengths > 1_1 a__ : Dict = self.token_ids[indices] a__ : List[str] = self.lengths[indices] a__ : int = len(self ) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' ) def SCREAMING_SNAKE_CASE__( self ) -> List[Any]: """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: a__ : Union[str, Any] = self.params.special_tok_ids["""unk_token"""] a__ : List[Any] = len(self ) a__ : Optional[int] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) a__ : Optional[Any] = (unk_occs / self.lengths) < 0.5 a__ : Tuple = self.token_ids[indices] a__ : Union[str, Any] = self.lengths[indices] a__ : Tuple = len(self ) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' ) def SCREAMING_SNAKE_CASE__( self ) -> str: """simple docstring""" if not self.params.is_master: return logger.info(F'''{len(self )} sequences''' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> Optional[int]: """simple docstring""" a__ : Optional[int] = [t[0] for t in batch] a__ : Any = [t[1] for t in batch] assert len(__lowercase ) == len(__lowercase ) # Max for paddings a__ : List[Any] = max(__lowercase ) # Pad token ids if self.params.mlm: a__ : int = self.params.special_tok_ids["""pad_token"""] else: a__ : List[str] = self.params.special_tok_ids["""unk_token"""] a__ : int = [list(t.astype(__lowercase ) ) + [pad_idx] * (max_seq_len_ - len(__lowercase )) for t in token_ids] assert len(tk_ ) == len(__lowercase ) assert all(len(__lowercase ) == max_seq_len_ for t in tk_ ) a__ : List[Any] = torch.tensor(tk_ ) # (bs, max_seq_len_) a__ : Optional[int] = torch.tensor(__lowercase ) # (bs) return tk_t, lg_t
266
0
import math class _a : def __init__( self : Any , _SCREAMING_SNAKE_CASE : Tuple=0 )-> str: # a graph with Node 0,1,...,N-1 lowerCAmelCase__ : Optional[Any] = n lowerCAmelCase__ : Optional[Any] = [ [math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase ) ] # adjacency matrix for weight lowerCAmelCase__ : List[str] = [ [math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase ) ] # dp[i][j] stores minimum distance from i to j def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] )-> Optional[int]: lowerCAmelCase__ : Tuple = w def UpperCAmelCase__( self : str )-> Optional[Any]: for k in range(0 , self.n ): for i in range(0 , self.n ): for j in range(0 , self.n ): lowerCAmelCase__ : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] ) def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict )-> Optional[int]: return self.dp[u][v] if __name__ == "__main__": lowerCamelCase = Graph(5) graph.add_edge(0, 2, 9) graph.add_edge(0, 4, 10) graph.add_edge(1, 3, 5) graph.add_edge(2, 3, 7) graph.add_edge(3, 0, 10) graph.add_edge(3, 1, 2) graph.add_edge(3, 2, 1) graph.add_edge(3, 4, 6) graph.add_edge(4, 1, 3) graph.add_edge(4, 2, 4) graph.add_edge(4, 3, 9) graph.floyd_warshall() graph.show_min(1, 4) graph.show_min(0, 3)
131
from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, ) @flax.struct.dataclass class __lowerCAmelCase ( a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = 42 class __lowerCAmelCase ( nn.Module ): """simple docstring""" _SCREAMING_SNAKE_CASE = 42 _SCREAMING_SNAKE_CASE = (16, 32, 96, 256) _SCREAMING_SNAKE_CASE = jnp.floataa def lowerCAmelCase__ ( self : Optional[Any] ) -> Dict: """simple docstring""" snake_case_ = nn.Conv( self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) snake_case_ = [] for i in range(len(self.block_out_channels ) - 1 ): snake_case_ = self.block_out_channels[i] snake_case_ = self.block_out_channels[i + 1] snake_case_ = nn.Conv( _lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_lowerCAmelCase ) snake_case_ = nn.Conv( _lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) blocks.append(_lowerCAmelCase ) snake_case_ = blocks snake_case_ = nn.Conv( self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : Union[str, Any] , _lowerCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" snake_case_ = self.conv_in(_lowerCAmelCase ) snake_case_ = nn.silu(_lowerCAmelCase ) for block in self.blocks: snake_case_ = block(_lowerCAmelCase ) snake_case_ = nn.silu(_lowerCAmelCase ) snake_case_ = self.conv_out(_lowerCAmelCase ) return embedding @flax_register_to_config class __lowerCAmelCase ( nn.Module , a , a ): """simple docstring""" _SCREAMING_SNAKE_CASE = 32 _SCREAMING_SNAKE_CASE = 4 _SCREAMING_SNAKE_CASE = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = (320, 640, 1280, 1280) _SCREAMING_SNAKE_CASE = 2 _SCREAMING_SNAKE_CASE = 8 _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = 1280 _SCREAMING_SNAKE_CASE = 0.0 _SCREAMING_SNAKE_CASE = False _SCREAMING_SNAKE_CASE = jnp.floataa _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = "rgb" _SCREAMING_SNAKE_CASE = (16, 32, 96, 256) def lowerCAmelCase__ ( self : str , _lowerCAmelCase : jax.random.KeyArray ) -> FrozenDict: """simple docstring""" # init input tensors snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size) snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa ) snake_case_ = jnp.ones((1,) , dtype=jnp.intaa ) snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) snake_case_ = (1, 3, self.sample_size * 8, self.sample_size * 8) snake_case_ = jnp.zeros(_lowerCAmelCase , dtype=jnp.floataa ) snake_case_ , snake_case_ = jax.random.split(_lowerCAmelCase ) snake_case_ = {"params": params_rng, "dropout": dropout_rng} return self.init(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )["params"] def lowerCAmelCase__ ( self : List[Any] ) -> Any: """simple docstring""" snake_case_ = self.block_out_channels snake_case_ = block_out_channels[0] * 4 # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. snake_case_ = self.num_attention_heads or self.attention_head_dim # input snake_case_ = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time snake_case_ = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) snake_case_ = FlaxTimestepEmbedding(_lowerCAmelCase , dtype=self.dtype ) snake_case_ = FlaxControlNetConditioningEmbedding( conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , ) snake_case_ = self.only_cross_attention if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case_ = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case_ = (num_attention_heads,) * len(self.down_block_types ) # down snake_case_ = [] snake_case_ = [] snake_case_ = block_out_channels[0] snake_case_ = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_lowerCAmelCase ) for i, down_block_type in enumerate(self.down_block_types ): snake_case_ = output_channel snake_case_ = block_out_channels[i] snake_case_ = i == len(_lowerCAmelCase ) - 1 if down_block_type == "CrossAttnDownBlock2D": snake_case_ = FlaxCrossAttnDownBlockaD( in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , ) else: snake_case_ = FlaxDownBlockaD( in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_lowerCAmelCase ) for _ in range(self.layers_per_block ): snake_case_ = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_lowerCAmelCase ) if not is_final_block: snake_case_ = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) controlnet_down_blocks.append(_lowerCAmelCase ) snake_case_ = down_blocks snake_case_ = controlnet_down_blocks # mid snake_case_ = block_out_channels[-1] snake_case_ = FlaxUNetMidBlockaDCrossAttn( in_channels=_lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , ) snake_case_ = nn.Conv( _lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , ) def __call__( self : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : float = 1.0 , _lowerCAmelCase : bool = True , _lowerCAmelCase : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]: """simple docstring""" snake_case_ = self.controlnet_conditioning_channel_order if channel_order == "bgr": snake_case_ = jnp.flip(_lowerCAmelCase , axis=1 ) # 1. time if not isinstance(_lowerCAmelCase , jnp.ndarray ): snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0: snake_case_ = timesteps.astype(dtype=jnp.floataa ) snake_case_ = jnp.expand_dims(_lowerCAmelCase , 0 ) snake_case_ = self.time_proj(_lowerCAmelCase ) snake_case_ = self.time_embedding(_lowerCAmelCase ) # 2. pre-process snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) ) snake_case_ = self.conv_in(_lowerCAmelCase ) snake_case_ = jnp.transpose(_lowerCAmelCase , (0, 2, 3, 1) ) snake_case_ = self.controlnet_cond_embedding(_lowerCAmelCase ) sample += controlnet_cond # 3. down snake_case_ = (sample,) for down_block in self.down_blocks: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train ) else: snake_case_ , snake_case_ = down_block(_lowerCAmelCase , _lowerCAmelCase , deterministic=not train ) down_block_res_samples += res_samples # 4. mid snake_case_ = self.mid_block(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , deterministic=not train ) # 5. contronet blocks snake_case_ = () for down_block_res_sample, controlnet_block in zip(_lowerCAmelCase , self.controlnet_down_blocks ): snake_case_ = controlnet_block(_lowerCAmelCase ) controlnet_down_block_res_samples += (down_block_res_sample,) snake_case_ = controlnet_down_block_res_samples snake_case_ = self.controlnet_mid_block(_lowerCAmelCase ) # 6. scaling snake_case_ = [sample * conditioning_scale for sample in down_block_res_samples] mid_block_res_sample *= conditioning_scale if not return_dict: return (down_block_res_samples, mid_block_res_sample) return FlaxControlNetOutput( down_block_res_samples=_lowerCAmelCase , mid_block_res_sample=_lowerCAmelCase )
159
0
"""simple docstring""" import bza import gzip import lzma import os import shutil import struct import tarfile import warnings import zipfile from abc import ABC, abstractmethod from pathlib import Path from typing import Dict, List, Optional, Type, Union from .. import config from .filelock import FileLock from .logging import get_logger _snake_case = get_logger(__name__) class UpperCamelCase : def __init__( self : Tuple , UpperCAmelCase__ : Optional[str] = None ) -> Dict: _a : Dict = ( os.path.join(UpperCAmelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH ) _a : Any = Extractor def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : str ) -> str: from .file_utils import hash_url_to_filename # Path where we extract compressed archives # We extract in the cache dir, and get the extracted path name by hashing the original path" _a : Union[str, Any] = os.path.abspath(UpperCAmelCase__ ) return os.path.join(self.extract_dir , hash_url_to_filename(UpperCAmelCase__ ) ) def _lowercase ( self : int , UpperCAmelCase__ : str , UpperCAmelCase__ : bool ) -> bool: return force_extract or ( not os.path.isfile(UpperCAmelCase__ ) and not (os.path.isdir(UpperCAmelCase__ ) and os.listdir(UpperCAmelCase__ )) ) def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : bool = False ) -> str: _a : Optional[Any] = self.extractor.infer_extractor_format(UpperCAmelCase__ ) if not extractor_format: return input_path _a : Tuple = self._get_output_path(UpperCAmelCase__ ) if self._do_extract(UpperCAmelCase__ , UpperCAmelCase__ ): self.extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) return output_path class UpperCamelCase ( snake_case_ ): @classmethod @abstractmethod def _lowercase ( cls : Any , UpperCAmelCase__ : Union[Path, str] , **UpperCAmelCase__ : List[Any] ) -> bool: ... @staticmethod @abstractmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: ... class UpperCamelCase ( snake_case_ , snake_case_ ): UpperCamelCase : List[bytes] = [] @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : int ) -> Union[str, Any]: with open(UpperCAmelCase__ , """rb""" ) as f: return f.read(UpperCAmelCase__ ) @classmethod def _lowercase ( cls : Dict , UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : bytes = b"" ) -> bool: if not magic_number: _a : Union[str, Any] = max(len(UpperCAmelCase__ ) for cls_magic_number in cls.magic_numbers ) try: _a : List[Any] = cls.read_magic_number(UpperCAmelCase__ , UpperCAmelCase__ ) except OSError: return False return any(magic_number.startswith(UpperCAmelCase__ ) for cls_magic_number in cls.magic_numbers ) class UpperCamelCase ( snake_case_ ): @classmethod def _lowercase ( cls : List[str] , UpperCAmelCase__ : Union[Path, str] , **UpperCAmelCase__ : Dict ) -> bool: return tarfile.is_tarfile(UpperCAmelCase__ ) @staticmethod def _lowercase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] ) -> str: def resolved(UpperCAmelCase__ : str ) -> str: return os.path.realpath(os.path.abspath(UpperCAmelCase__ ) ) def badpath(UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> bool: # joinpath will ignore base if path is absolute return not resolved(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ).startswith(UpperCAmelCase__ ) def badlink(UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str ) -> bool: # Links are interpreted relative to the directory containing the link _a : str = resolved(os.path.join(UpperCAmelCase__ , os.path.dirname(info.name ) ) ) return badpath(info.linkname , base=UpperCAmelCase__ ) _a : Tuple = resolved(UpperCAmelCase__ ) for finfo in members: if badpath(finfo.name , UpperCAmelCase__ ): logger.error(f"""Extraction of {finfo.name} is blocked (illegal path)""" ) elif finfo.issym() and badlink(UpperCAmelCase__ , UpperCAmelCase__ ): logger.error(f"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" ) elif finfo.islnk() and badlink(UpperCAmelCase__ , UpperCAmelCase__ ): logger.error(f"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" ) else: yield finfo @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) _a : int = tarfile.open(UpperCAmelCase__ ) tar_file.extractall(UpperCAmelCase__ , members=TarExtractor.safemembers(UpperCAmelCase__ , UpperCAmelCase__ ) ) tar_file.close() class UpperCamelCase ( snake_case_ ): UpperCamelCase : Any = [b'''\x1F\x8B'''] @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: with gzip.open(UpperCAmelCase__ , """rb""" ) as gzip_file: with open(UpperCAmelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCAmelCase__ , UpperCAmelCase__ ) class UpperCamelCase ( snake_case_ ): UpperCamelCase : Any = [ b'''PK\x03\x04''', b'''PK\x05\x06''', # empty archive b'''PK\x07\x08''', # spanned archive ] @classmethod def _lowercase ( cls : Any , UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : bytes = b"" ) -> bool: if super().is_extractable(UpperCAmelCase__ , magic_number=UpperCAmelCase__ ): return True try: # Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives. # From: https://github.com/python/cpython/pull/5053 from zipfile import ( _CD_SIGNATURE, _ECD_DISK_NUMBER, _ECD_DISK_START, _ECD_ENTRIES_TOTAL, _ECD_OFFSET, _ECD_SIZE, _EndRecData, sizeCentralDir, stringCentralDir, structCentralDir, ) with open(UpperCAmelCase__ , """rb""" ) as fp: _a : Optional[Any] = _EndRecData(UpperCAmelCase__ ) if endrec: if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0: return True # Empty zipfiles are still zipfiles elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]: fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir: _a : str = fp.read(UpperCAmelCase__ ) # CD is where we expect it to be if len(UpperCAmelCase__ ) == sizeCentralDir: _a : str = struct.unpack(UpperCAmelCase__ , UpperCAmelCase__ ) # CD is the right size if centdir[_CD_SIGNATURE] == stringCentralDir: return True # First central directory entry has correct magic number return False except Exception: # catch all errors in case future python versions change the zipfile internals return False @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) with zipfile.ZipFile(UpperCAmelCase__ , """r""" ) as zip_file: zip_file.extractall(UpperCAmelCase__ ) zip_file.close() class UpperCamelCase ( snake_case_ ): UpperCamelCase : Any = [b'''\xFD\x37\x7A\x58\x5A\x00'''] @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: with lzma.open(UpperCAmelCase__ ) as compressed_file: with open(UpperCAmelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCAmelCase__ , UpperCAmelCase__ ) class UpperCamelCase ( snake_case_ ): UpperCamelCase : List[Any] = [b'''Rar!\x1a\x07\x00''', b'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: if not config.RARFILE_AVAILABLE: raise ImportError("""Please pip install rarfile""" ) import rarfile os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) _a : Optional[int] = rarfile.RarFile(UpperCAmelCase__ ) rf.extractall(UpperCAmelCase__ ) rf.close() class UpperCamelCase ( snake_case_ ): UpperCamelCase : Optional[int] = [b'''\x28\xb5\x2F\xFD'''] @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: if not config.ZSTANDARD_AVAILABLE: raise ImportError("""Please pip install zstandard""" ) import zstandard as zstd _a : Dict = zstd.ZstdDecompressor() with open(UpperCAmelCase__ , """rb""" ) as ifh, open(UpperCAmelCase__ , """wb""" ) as ofh: dctx.copy_stream(UpperCAmelCase__ , UpperCAmelCase__ ) class UpperCamelCase ( snake_case_ ): UpperCamelCase : List[str] = [b'''\x42\x5A\x68'''] @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: with bza.open(UpperCAmelCase__ , """rb""" ) as compressed_file: with open(UpperCAmelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCAmelCase__ , UpperCAmelCase__ ) class UpperCamelCase ( snake_case_ ): UpperCamelCase : Any = [b'''\x37\x7A\xBC\xAF\x27\x1C'''] @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: if not config.PY7ZR_AVAILABLE: raise ImportError("""Please pip install py7zr""" ) import pyazr os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ ) with pyazr.SevenZipFile(UpperCAmelCase__ , """r""" ) as archive: archive.extractall(UpperCAmelCase__ ) class UpperCamelCase ( snake_case_ ): UpperCamelCase : Union[str, Any] = [b'''\x04\x22\x4D\x18'''] @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] ) -> None: if not config.LZ4_AVAILABLE: raise ImportError("""Please pip install lz4""" ) import lza.frame with lza.frame.open(UpperCAmelCase__ , """rb""" ) as compressed_file: with open(UpperCAmelCase__ , """wb""" ) as extracted_file: shutil.copyfileobj(UpperCAmelCase__ , UpperCAmelCase__ ) class UpperCamelCase : # Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip) UpperCamelCase : Dict[str, Type[BaseExtractor]] = { "tar": TarExtractor, "gzip": GzipExtractor, "zip": ZipExtractor, "xz": XzExtractor, "rar": RarExtractor, "zstd": ZstdExtractor, "bz2": BzipaExtractor, "7z": SevenZipExtractor, # <Added version="2.4.0"/> "lz4": LzaExtractor, # <Added version="2.4.0"/> } @classmethod def _lowercase ( cls : Dict ) -> Union[str, Any]: return max( len(UpperCAmelCase__ ) for extractor in cls.extractors.values() if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ) for extractor_magic_number in extractor.magic_numbers ) @staticmethod def _lowercase ( UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : int ) -> Union[str, Any]: try: return MagicNumberBaseExtractor.read_magic_number(UpperCAmelCase__ , magic_number_length=UpperCAmelCase__ ) except OSError: return b"" @classmethod def _lowercase ( cls : Any , UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : bool = False ) -> bool: warnings.warn( """Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'infer_extractor_format' instead.""" , category=UpperCAmelCase__ , ) _a : Union[str, Any] = cls.infer_extractor_format(UpperCAmelCase__ ) if extractor_format: return True if not return_extractor else (True, cls.extractors[extractor_format]) return False if not return_extractor else (False, None) @classmethod def _lowercase ( cls : str , UpperCAmelCase__ : Union[Path, str] ) -> str: # <Added version="2.4.0"/> _a : Optional[int] = cls._get_magic_number_max_length() _a : List[Any] = cls._read_magic_number(UpperCAmelCase__ , UpperCAmelCase__ ) for extractor_format, extractor in cls.extractors.items(): if extractor.is_extractable(UpperCAmelCase__ , magic_number=UpperCAmelCase__ ): return extractor_format @classmethod def _lowercase ( cls : str , UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Union[Path, str] , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[BaseExtractor] = "deprecated" , ) -> None: os.makedirs(os.path.dirname(UpperCAmelCase__ ) , exist_ok=UpperCAmelCase__ ) # Prevent parallel extractions _a : Any = str(Path(UpperCAmelCase__ ).with_suffix(""".lock""" ) ) with FileLock(UpperCAmelCase__ ): shutil.rmtree(UpperCAmelCase__ , ignore_errors=UpperCAmelCase__ ) if extractor_format or extractor != "deprecated": if extractor != "deprecated" or not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ): # passed as positional arg warnings.warn( """Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """ """Use 'extractor_format' instead.""" , category=UpperCAmelCase__ , ) _a : Optional[int] = extractor if extractor != """deprecated""" else extractor_format else: _a : Optional[Any] = cls.extractors[extractor_format] return extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ ) else: warnings.warn( """Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """ """exception in 3.0.0.""" , category=UpperCAmelCase__ , ) for extractor in cls.extractors.values(): if extractor.is_extractable(UpperCAmelCase__ ): return extractor.extract(UpperCAmelCase__ , UpperCAmelCase__ )
324
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _snake_case = { 'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig'] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['VisionEncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['TFVisionEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _snake_case = ['FlaxVisionEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
1
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=24 , _lowerCamelCase=2 , _lowerCamelCase=6 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=None , _lowerCamelCase=1000 , ): UpperCAmelCase__ : str = parent UpperCAmelCase__ : Union[str, Any] = batch_size UpperCAmelCase__ : Any = seq_length UpperCAmelCase__ : Tuple = is_training UpperCAmelCase__ : Dict = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Tuple = use_labels UpperCAmelCase__ : int = vocab_size UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : Union[str, Any] = intermediate_size UpperCAmelCase__ : Optional[int] = hidden_act UpperCAmelCase__ : str = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Tuple = type_vocab_size UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : Any = num_labels UpperCAmelCase__ : Dict = scope UpperCAmelCase__ : int = range_bbox def snake_case__ ( self): UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox) # Ensure that bbox is legal for i in range(bbox.shape[0]): for j in range(bbox.shape[1]): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCAmelCase__ : List[Any] = bbox[i, j, 3] UpperCAmelCase__ : str = bbox[i, j, 1] UpperCAmelCase__ : List[str] = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCAmelCase__ : Union[str, Any] = bbox[i, j, 2] UpperCAmelCase__ : List[Any] = bbox[i, j, 0] UpperCAmelCase__ : Optional[int] = t UpperCAmelCase__ : Any = None if self.use_input_mask: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2) UpperCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Dict = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size) UpperCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) UpperCAmelCase__ : Any = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def snake_case__ ( self): return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): UpperCAmelCase__ : Any = LiltModel(config=_lowerCamelCase) model.to(_lowerCamelCase) model.eval() UpperCAmelCase__ : List[Any] = model(_lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase) UpperCAmelCase__ : Any = model(_lowerCamelCase , bbox=_lowerCamelCase , token_type_ids=_lowerCamelCase) UpperCAmelCase__ : List[str] = model(_lowerCamelCase , bbox=_lowerCamelCase) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): UpperCAmelCase__ : List[Any] = self.num_labels UpperCAmelCase__ : str = LiltForTokenClassification(config=_lowerCamelCase) model.to(_lowerCamelCase) model.eval() UpperCAmelCase__ : Any = model( _lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ): UpperCAmelCase__ : Optional[int] = LiltForQuestionAnswering(config=_lowerCamelCase) model.to(_lowerCamelCase) model.eval() UpperCAmelCase__ : Any = model( _lowerCamelCase , bbox=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def snake_case__ ( self): UpperCAmelCase__ : Dict = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = { """input_ids""": input_ids, """bbox""": bbox, """token_type_ids""": token_type_ids, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class _snake_case ( a__ , a__ , a__ , unittest.TestCase ): lowerCAmelCase :Optional[Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) lowerCAmelCase :Optional[int] = ( { '''feature-extraction''': LiltModel, '''question-answering''': LiltForQuestionAnswering, '''text-classification''': LiltForSequenceClassification, '''token-classification''': LiltForTokenClassification, '''zero-shot''': LiltForSequenceClassification, } if is_torch_available() else {} ) lowerCAmelCase :Union[str, Any] = False lowerCAmelCase :int = False def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): return True def snake_case__ ( self): UpperCAmelCase__ : Tuple = LiltModelTester(self) UpperCAmelCase__ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37) def snake_case__ ( self): self.config_tester.run_common_tests() def snake_case__ ( self): UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCamelCase) def snake_case__ ( self): UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : List[str] = type self.model_tester.create_and_check_model(*_lowerCamelCase) def snake_case__ ( self): UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase) def snake_case__ ( self): UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase) @slow def snake_case__ ( self): for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = LiltModel.from_pretrained(_lowerCamelCase) self.assertIsNotNone(_lowerCamelCase) @require_torch @slow class _snake_case ( unittest.TestCase ): def snake_case__ ( self): UpperCAmelCase__ : Any = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""").to(_lowerCamelCase) UpperCAmelCase__ : Tuple = torch.tensor([[1, 2]] , device=_lowerCamelCase) UpperCAmelCase__ : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowerCamelCase) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[Any] = model(input_ids=_lowerCamelCase , bbox=_lowerCamelCase) UpperCAmelCase__ : Union[str, Any] = torch.Size([1, 2, 768]) UpperCAmelCase__ : List[str] = torch.tensor( [[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_lowerCamelCase , ) self.assertTrue(outputs.last_hidden_state.shape , _lowerCamelCase) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowerCamelCase , atol=1e-3))
163
'''simple docstring''' from collections import namedtuple import requests from lxml import html # type: ignore __A =namedtuple('covid_data', 'cases deaths recovered') def _UpperCamelCase ( UpperCamelCase__ = "https://www.worldometers.info/coronavirus/" ): UpperCAmelCase__ : Union[str, Any] = """//div[@class = \"maincounter-number\"]/span/text()""" return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) ) __A ='Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}' print(fmt.format(*covid_stats()))
163
1
from math import factorial def A ( _UpperCAmelCase : int = 100 ) -> int: '''simple docstring''' return sum(map(_UpperCAmelCase , str(factorial(_UpperCAmelCase ) ) ) ) if __name__ == "__main__": print(solution(int(input("Enter the Number: ").strip())))
290
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , ) @pytest.mark.usefixtures('''sm_env''' ) @parameterized_class( [ { '''framework''': '''pytorch''', '''script''': '''run_glue.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9}, }, { '''framework''': '''tensorflow''', '''script''': '''run_tf.py''', '''model_name_or_path''': '''distilbert-base-cased''', '''instance_type''': '''ml.g4dn.xlarge''', '''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9}, }, ] ) class __lowerCAmelCase ( unittest.TestCase ): def _lowerCamelCase ( self : List[Any]) -> Any: """simple docstring""" if self.framework == "pytorch": subprocess.run( F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='utf-8' , check=A , ) assert hasattr(self , 'env') def _lowerCamelCase ( self : Any , A : Tuple=1) -> List[str]: """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , ) def _lowerCamelCase ( self : Dict , A : int) -> str: """simple docstring""" TrainingJobAnalytics(A).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv") def _lowerCamelCase ( self : Union[str, Any]) -> Dict: """simple docstring""" _UpperCAmelCase = self.create_estimator() # run training estimator.fit() # result dataframe _UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value']) _UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value']) # get train time from SageMaker job, this includes starting, preprocessing, stopping _UpperCAmelCase = ( Session().describe_training_job(estimator.latest_training_job.name).get('TrainingTimeInSeconds' , 99_99_99) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy) assert all(t <= self.results['eval_loss'] for t in eval_loss) # dump tests result into json file to share in PR with open(F"{estimator.latest_training_job.name}.json" , 'w') as outfile: json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , A)
290
1
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Any = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Dict = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) UpperCAmelCase : str = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) UpperCAmelCase : Optional[Any] = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] ) UpperCAmelCase : str = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(A_ ) , [ [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], ] , ) UpperCAmelCase : str = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) # Legacy behavior UpperCAmelCase : str = text_classifier("""This is great !""" , return_all_scores=A_ ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) UpperCAmelCase : Any = text_classifier("""This is great !""" , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] ) UpperCAmelCase : Optional[Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [ [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], ] , ) UpperCAmelCase : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [ {"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_0""", """score""": 0.5_0_4}, ] , ) @require_torch def _lowercase( self ) -> Optional[Any]: import torch UpperCAmelCase : Tuple = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) UpperCAmelCase : str = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) @require_tf def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) UpperCAmelCase : Tuple = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = pipeline("""text-classification""" ) UpperCAmelCase : Any = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) UpperCAmelCase : Dict = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) UpperCAmelCase : List[Any] = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] ) @slow @require_tf def _lowercase( self ) -> Tuple: UpperCAmelCase : int = pipeline("""text-classification""" , framework="""tf""" ) UpperCAmelCase : Dict = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) UpperCAmelCase : Dict = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) UpperCAmelCase : Optional[int] = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] ) def _lowercase( self , A , A , A ) -> Optional[Any]: UpperCAmelCase : Optional[int] = TextClassificationPipeline(model=A_ , tokenizer=A_ ) return text_classifier, ["HuggingFace is in", "This is another test"] def _lowercase( self , A , A ) -> str: UpperCAmelCase : Union[str, Any] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCAmelCase : Optional[int] = "HuggingFace is in" UpperCAmelCase : List[Any] = text_classifier(A_ ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": ANY(A_ ), """score""": ANY(A_ )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) UpperCAmelCase : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"] UpperCAmelCase : Optional[int] = text_classifier(A_ ) self.assertEqual( nested_simplify(A_ ) , [{"""label""": ANY(A_ ), """score""": ANY(A_ )}, {"""label""": ANY(A_ ), """score""": ANY(A_ )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCAmelCase : List[str] = text_classifier(A_ , top_k=A_ ) UpperCAmelCase : int = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(A_ ) , [[{"""label""": ANY(A_ ), """score""": ANY(A_ )}] * N, [{"""label""": ANY(A_ ), """score""": ANY(A_ )}] * N] , ) UpperCAmelCase : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} UpperCAmelCase : List[Any] = text_classifier(A_ ) self.assertEqual( nested_simplify(A_ ) , {"""label""": ANY(A_ ), """score""": ANY(A_ )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCAmelCase : int = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(A_ ): text_classifier(A_ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCAmelCase : Any = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(A_ ) , [{"""label""": ANY(A_ ), """score""": ANY(A_ )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
265
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
0
from __future__ import annotations def __lowerCAmelCase ( a__ , a__ ) -> bool: if len(a__ ) == 0: return False __a = len(a__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , a__ ) else: return binary_search(a_list[midpoint + 1 :] , a__ ) if __name__ == "__main__": A = input('Enter numbers separated by comma:\n').strip() A = [int(item.strip()) for item in user_input.split(',')] A = int(input('Enter the number to be found in the list:\n').strip()) A = '' if binary_search(sequence, target) else 'not ' print(F"{target} was {not_str}found in {sequence}")
362
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available A : Dict = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
33
0
"""simple docstring""" from math import sqrt def a__ ( lowerCAmelCase = 1_00_00_00 ) -> List[str]: UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int = 0 UpperCAmelCase__ : int while num_cuboids <= limit: max_cuboid_size += 1 for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ): if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer(): num_cuboids += ( min(lowercase__ , sum_shortest_sides // 2 ) - max(1 , sum_shortest_sides - max_cuboid_size ) + 1 ) return max_cuboid_size if __name__ == "__main__": print(f'''{solution() = }''')
171
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __A = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( lowerCamelCase_ ): a__ : Union[str, Any] = ["""pixel_values"""] def __init__( self , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 1 / 255 , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , **SCREAMING_SNAKE_CASE , ): """simple docstring""" super().__init__(**SCREAMING_SNAKE_CASE ) snake_case : int = size if size is not None else {"shortest_edge": 224} snake_case : int = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) snake_case : List[str] = crop_size if crop_size is not None else {"height": 224, "width": 224} snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name="crop_size" ) snake_case : Dict = do_resize snake_case : Optional[int] = size snake_case : int = resample snake_case : Union[str, Any] = do_center_crop snake_case : Dict = crop_size snake_case : Dict = do_rescale snake_case : Any = rescale_factor snake_case : Tuple = do_normalize snake_case : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN snake_case : Tuple = image_std if image_std is not None else OPENAI_CLIP_STD snake_case : Tuple = do_convert_rgb def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ): """simple docstring""" snake_case : List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case : Dict = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE ) return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ): """simple docstring""" snake_case : Tuple = get_size_dict(SCREAMING_SNAKE_CASE ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(SCREAMING_SNAKE_CASE , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ): """simple docstring""" return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ): """simple docstring""" return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE , ): """simple docstring""" snake_case : int = do_resize if do_resize is not None else self.do_resize snake_case : List[str] = size if size is not None else self.size snake_case : Dict = get_size_dict(SCREAMING_SNAKE_CASE , param_name="size" , default_to_square=SCREAMING_SNAKE_CASE ) snake_case : Optional[Any] = resample if resample is not None else self.resample snake_case : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case : Optional[int] = crop_size if crop_size is not None else self.crop_size snake_case : Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name="crop_size" , default_to_square=SCREAMING_SNAKE_CASE ) snake_case : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale snake_case : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize snake_case : List[str] = image_mean if image_mean is not None else self.image_mean snake_case : Optional[int] = image_std if image_std is not None else self.image_std snake_case : Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE ) if not valid_images(SCREAMING_SNAKE_CASE ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case : Optional[int] = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images] # All transformations expect numpy arrays. snake_case : List[str] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images] if do_resize: snake_case : Optional[Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images] if do_center_crop: snake_case : int = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images] if do_rescale: snake_case : str = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images] if do_normalize: snake_case : Optional[int] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images] snake_case : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images] snake_case : Tuple = {"pixel_values": images} return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
148
0
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def _a ( SCREAMING_SNAKE_CASE : str ): """simple docstring""" if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(SCREAMING_SNAKE_CASE , '''_dynamo''' ): return False return isinstance(SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule ) def _a ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : bool = True ): """simple docstring""" UpperCamelCase__ : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCamelCase__ : Optional[Any] = is_compiled_module(SCREAMING_SNAKE_CASE ) if is_compiled: UpperCamelCase__ : Optional[Any] = model UpperCamelCase__ : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : List[str] = model.module if not keep_fpaa_wrapper: UpperCamelCase__ : List[Any] = getattr(SCREAMING_SNAKE_CASE , '''forward''' ) UpperCamelCase__ : Optional[Any] = model.__dict__.pop('''_original_forward''' , SCREAMING_SNAKE_CASE ) if original_forward is not None: while hasattr(SCREAMING_SNAKE_CASE , '''__wrapped__''' ): UpperCamelCase__ : int = forward.__wrapped__ if forward == original_forward: break UpperCamelCase__ : Any = forward if getattr(SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , SCREAMING_SNAKE_CASE ): convert_model(SCREAMING_SNAKE_CASE , to_transformer_engine=SCREAMING_SNAKE_CASE ) if is_compiled: UpperCamelCase__ : Dict = model UpperCamelCase__ : int = compiled_model return model def _a ( ): """simple docstring""" PartialState().wait_for_everyone() def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ): """simple docstring""" if PartialState().distributed_type == DistributedType.TPU: xm.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) elif PartialState().local_process_index == 0: torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) @contextmanager def _a ( **SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" for key, value in kwargs.items(): UpperCamelCase__ : int = str(SCREAMING_SNAKE_CASE ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def _a ( SCREAMING_SNAKE_CASE : Optional[Any] ): """simple docstring""" if not hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ): UpperCamelCase__ : int = getattr(SCREAMING_SNAKE_CASE , '''__class__''' , SCREAMING_SNAKE_CASE ) if hasattr(SCREAMING_SNAKE_CASE , '''__qualname__''' ): return obj.__qualname__ if hasattr(SCREAMING_SNAKE_CASE , '''__name__''' ): return obj.__name__ return str(SCREAMING_SNAKE_CASE ) def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ): """simple docstring""" for key, value in source.items(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase__ : Optional[int] = destination.setdefault(SCREAMING_SNAKE_CASE , {} ) merge_dicts(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else: UpperCamelCase__ : Any = value return destination def _a ( SCREAMING_SNAKE_CASE : int = None ): """simple docstring""" if port is None: UpperCamelCase__ : Tuple = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('''localhost''', port) ) == 0
354
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch __UpperCamelCase : int = logging.get_logger(__name__) class __magic_name__ ( __lowerCAmelCase): A: str = ["pixel_values"] def __init__( self : str , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[int, float] = 1 / 255 , lowerCamelCase__ : bool = True , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = True , **lowerCamelCase__ : Any , ) -> None: '''simple docstring''' super().__init__(**lowerCamelCase__ ) UpperCamelCase__ : Optional[int] = size if size is not None else {'''shortest_edge''': 224} UpperCamelCase__ : List[str] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) UpperCamelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 256, '''width''': 256} UpperCamelCase__ : Dict = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' ) UpperCamelCase__ : Optional[Any] = do_resize UpperCamelCase__ : List[Any] = size UpperCamelCase__ : Optional[int] = resample UpperCamelCase__ : Optional[int] = do_rescale UpperCamelCase__ : Dict = rescale_factor UpperCamelCase__ : Optional[Any] = do_center_crop UpperCamelCase__ : int = crop_size UpperCamelCase__ : List[str] = do_flip_channel_order def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : PILImageResampling = PIL.Image.BILINEAR , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[str] , ) -> np.ndarray: '''simple docstring''' UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}" ) UpperCamelCase__ : int = get_resize_output_image_size(lowerCamelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCamelCase__ ) return resize(lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def UpperCAmelCase__ ( self : int , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : List[Any] , ) -> np.ndarray: '''simple docstring''' UpperCamelCase__ : Optional[int] = get_size_dict(lowerCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}" ) return center_crop(lowerCamelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def UpperCAmelCase__ ( self : Tuple , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Union[int, float] , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase__ : Tuple , ) -> List[Any]: '''simple docstring''' return rescale(lowerCamelCase__ , scale=lowerCamelCase__ , data_format=lowerCamelCase__ , **lowerCamelCase__ ) def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase__ : np.ndarray , lowerCamelCase__ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: '''simple docstring''' return flip_channel_order(lowerCamelCase__ , data_format=lowerCamelCase__ ) def UpperCAmelCase__ ( self : Dict , lowerCamelCase__ : ImageInput , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : PILImageResampling = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : float = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Dict[str, int] = None , lowerCamelCase__ : bool = None , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase__ : List[Any] , ) -> PIL.Image.Image: '''simple docstring''' UpperCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize UpperCamelCase__ : List[Any] = resample if resample is not None else self.resample UpperCamelCase__ : str = do_rescale if do_rescale is not None else self.do_rescale UpperCamelCase__ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCamelCase__ : List[str] = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) UpperCamelCase__ : List[str] = size if size is not None else self.size UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , default_to_square=lowerCamelCase__ ) UpperCamelCase__ : Tuple = crop_size if crop_size is not None else self.crop_size UpperCamelCase__ : int = get_size_dict(lowerCamelCase__ , param_name='''crop_size''' ) UpperCamelCase__ : int = make_list_of_images(lowerCamelCase__ ) if not valid_images(lowerCamelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) # All transformations expect numpy arrays. UpperCamelCase__ : Union[str, Any] = [to_numpy_array(lowerCamelCase__ ) for image in images] if do_resize: UpperCamelCase__ : Tuple = [self.resize(image=lowerCamelCase__ , size=lowerCamelCase__ , resample=lowerCamelCase__ ) for image in images] if do_center_crop: UpperCamelCase__ : Optional[Any] = [self.center_crop(image=lowerCamelCase__ , size=lowerCamelCase__ ) for image in images] if do_rescale: UpperCamelCase__ : List[Any] = [self.rescale(image=lowerCamelCase__ , scale=lowerCamelCase__ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: UpperCamelCase__ : List[Any] = [self.flip_channel_order(image=lowerCamelCase__ ) for image in images] UpperCamelCase__ : Union[str, Any] = [to_channel_dimension_format(lowerCamelCase__ , lowerCamelCase__ ) for image in images] UpperCamelCase__ : int = {'''pixel_values''': images} return BatchFeature(data=lowerCamelCase__ , tensor_type=lowerCamelCase__ ) def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Tuple] = None ) -> Tuple: '''simple docstring''' UpperCamelCase__ : Tuple = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowerCamelCase__ ) != len(lowerCamelCase__ ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(lowerCamelCase__ ): UpperCamelCase__ : Tuple = target_sizes.numpy() UpperCamelCase__ : Any = [] for idx in range(len(lowerCamelCase__ ) ): UpperCamelCase__ : Optional[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCamelCase__ ) UpperCamelCase__ : Optional[Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowerCamelCase__ ) else: UpperCamelCase__ : Dict = logits.argmax(dim=1 ) UpperCamelCase__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
51
0
"""simple docstring""" import argparse from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta from transformers.utils import logging logging.set_verbosity_info() def A_ ( _lowercase, _lowercase, _lowercase ): '''simple docstring''' snake_case_ :str = TaConfig.from_json_file(_lowercase ) print(f"""Building PyTorch model from configuration: {config}""" ) snake_case_ :Optional[Any] = TaForConditionalGeneration(_lowercase ) # Load weights from tf checkpoint load_tf_weights_in_ta(_lowercase, _lowercase, _lowercase ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(_lowercase ) if __name__ == "__main__": __a = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __a = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
66
'''simple docstring''' import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() A_ : List[str] = logging.get_logger(__name__) A_ : Optional[Any] = { """b0""": efficientnet.EfficientNetBa, """b1""": efficientnet.EfficientNetBa, """b2""": efficientnet.EfficientNetBa, """b3""": efficientnet.EfficientNetBa, """b4""": efficientnet.EfficientNetBa, """b5""": efficientnet.EfficientNetBa, """b6""": efficientnet.EfficientNetBa, """b7""": efficientnet.EfficientNetBa, } A_ : Dict = { """b0""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.0, """image_size""": 2_2_4, """dropout_rate""": 0.2, """dw_padding""": [], }, """b1""": { """hidden_dim""": 1_2_8_0, """width_coef""": 1.0, """depth_coef""": 1.1, """image_size""": 2_4_0, """dropout_rate""": 0.2, """dw_padding""": [1_6], }, """b2""": { """hidden_dim""": 1_4_0_8, """width_coef""": 1.1, """depth_coef""": 1.2, """image_size""": 2_6_0, """dropout_rate""": 0.3, """dw_padding""": [5, 8, 1_6], }, """b3""": { """hidden_dim""": 1_5_3_6, """width_coef""": 1.2, """depth_coef""": 1.4, """image_size""": 3_0_0, """dropout_rate""": 0.3, """dw_padding""": [5, 1_8], }, """b4""": { """hidden_dim""": 1_7_9_2, """width_coef""": 1.4, """depth_coef""": 1.8, """image_size""": 3_8_0, """dropout_rate""": 0.4, """dw_padding""": [6], }, """b5""": { """hidden_dim""": 2_0_4_8, """width_coef""": 1.6, """depth_coef""": 2.2, """image_size""": 4_5_6, """dropout_rate""": 0.4, """dw_padding""": [1_3, 2_7], }, """b6""": { """hidden_dim""": 2_3_0_4, """width_coef""": 1.8, """depth_coef""": 2.6, """image_size""": 5_2_8, """dropout_rate""": 0.5, """dw_padding""": [3_1], }, """b7""": { """hidden_dim""": 2_5_6_0, """width_coef""": 2.0, """depth_coef""": 3.1, """image_size""": 6_0_0, """dropout_rate""": 0.5, """dw_padding""": [1_8], }, } def snake_case_ ( lowerCAmelCase_ )-> int: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = EfficientNetConfig() _UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""hidden_dim"""] _UpperCAmelCase : str = CONFIG_MAP[model_name]["""width_coef"""] _UpperCAmelCase : int = CONFIG_MAP[model_name]["""depth_coef"""] _UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : List[str] = CONFIG_MAP[model_name]["""dropout_rate"""] _UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""dw_padding"""] _UpperCAmelCase : List[str] = """huggingface/label-files""" _UpperCAmelCase : Optional[Any] = """imagenet-1k-id2label.json""" _UpperCAmelCase : List[str] = 1000 _UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) ) _UpperCAmelCase : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _UpperCAmelCase : List[str] = idalabel _UpperCAmelCase : str = {v: k for k, v in idalabel.items()} return config def snake_case_ ( )-> Tuple: '''simple docstring''' _UpperCAmelCase : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ) return im def snake_case_ ( lowerCAmelCase_ )-> Tuple: '''simple docstring''' _UpperCAmelCase : str = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : Tuple = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=lowerCAmelCase_ , ) return preprocessor def snake_case_ ( lowerCAmelCase_ )-> List[str]: '''simple docstring''' _UpperCAmelCase : int = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] _UpperCAmelCase : Optional[int] = sorted(set(lowerCAmelCase_ ) ) _UpperCAmelCase : str = len(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = {b: str(lowerCAmelCase_ ) for b, i in zip(lowerCAmelCase_ , range(lowerCAmelCase_ ) )} _UpperCAmelCase : List[str] = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: _UpperCAmelCase : Any = block_name_mapping[b] rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) _UpperCAmelCase : Union[str, Any] = {} for item in rename_keys: if item[0] in original_param_names: _UpperCAmelCase : str = """efficientnet.""" + item[1] _UpperCAmelCase : Optional[Any] = """classifier.weight""" _UpperCAmelCase : Optional[Any] = """classifier.bias""" return key_mapping def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue _UpperCAmelCase : Union[str, Any] = key_mapping[key] if "_conv" in key and "kernel" in key: _UpperCAmelCase : Optional[int] = torch.from_numpy(lowerCAmelCase_ ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: _UpperCAmelCase : Any = torch.from_numpy(lowerCAmelCase_ ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: _UpperCAmelCase : int = torch.from_numpy(np.transpose(lowerCAmelCase_ ) ) else: _UpperCAmelCase : List[str] = torch.from_numpy(lowerCAmelCase_ ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(lowerCAmelCase_ ) @torch.no_grad() def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Any: '''simple docstring''' _UpperCAmelCase : Union[str, Any] = model_classes[model_name]( include_top=lowerCAmelCase_ , weights="""imagenet""" , input_tensor=lowerCAmelCase_ , input_shape=lowerCAmelCase_ , pooling=lowerCAmelCase_ , classes=1000 , classifier_activation="""softmax""" , ) _UpperCAmelCase : List[str] = original_model.trainable_variables _UpperCAmelCase : Any = original_model.non_trainable_variables _UpperCAmelCase : Optional[int] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: _UpperCAmelCase : Dict = param.numpy() _UpperCAmelCase : Optional[Any] = list(tf_params.keys() ) # Load HuggingFace model _UpperCAmelCase : List[Any] = get_efficientnet_config(lowerCAmelCase_ ) _UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(lowerCAmelCase_ ).eval() _UpperCAmelCase : int = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) _UpperCAmelCase : Optional[int] = rename_keys(lowerCAmelCase_ ) replace_params(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # Initialize preprocessor and preprocess input image _UpperCAmelCase : str = convert_image_processor(lowerCAmelCase_ ) _UpperCAmelCase : List[str] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): _UpperCAmelCase : List[str] = hf_model(**lowerCAmelCase_ ) _UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference _UpperCAmelCase : int = False _UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""image_size"""] _UpperCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) _UpperCAmelCase : Optional[Any] = image.img_to_array(lowerCAmelCase_ ) _UpperCAmelCase : str = np.expand_dims(lowerCAmelCase_ , axis=0 ) _UpperCAmelCase : str = original_model.predict(lowerCAmelCase_ ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1e-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(lowerCAmelCase_ ): os.mkdir(lowerCAmelCase_ ) # Save converted model and image processor hf_model.save_pretrained(lowerCAmelCase_ ) preprocessor.save_pretrained(lowerCAmelCase_ ) if push_to_hub: # Push model and image processor to hub print(F'''Pushing converted {model_name} to the hub...''' ) _UpperCAmelCase : List[Any] = F'''efficientnet-{model_name}''' preprocessor.push_to_hub(lowerCAmelCase_ ) hf_model.push_to_hub(lowerCAmelCase_ ) if __name__ == "__main__": A_ : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""b0""", type=str, help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""hf_model""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""") parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""") A_ : Optional[Any] = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
215
0
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class __magic_name__ : def __init__( self , snake_case , snake_case=1_3 , snake_case=2 , snake_case=2_4 , snake_case=1_6 , snake_case=True , snake_case=True , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=1_0 , snake_case=0.02 , snake_case=None , snake_case=2 , snake_case=2 , ) -> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : str =parent _UpperCAmelCase : Optional[int] =batch_size _UpperCAmelCase : str =patch_size _UpperCAmelCase : List[Any] =max_length _UpperCAmelCase : Optional[Any] =num_mel_bins _UpperCAmelCase : int =is_training _UpperCAmelCase : Dict =use_labels _UpperCAmelCase : List[str] =hidden_size _UpperCAmelCase : str =num_hidden_layers _UpperCAmelCase : Any =num_attention_heads _UpperCAmelCase : List[str] =intermediate_size _UpperCAmelCase : int =hidden_act _UpperCAmelCase : Optional[Any] =hidden_dropout_prob _UpperCAmelCase : Optional[Any] =attention_probs_dropout_prob _UpperCAmelCase : int =type_sequence_label_size _UpperCAmelCase : Optional[int] =initializer_range _UpperCAmelCase : int =scope _UpperCAmelCase : int =frequency_stride _UpperCAmelCase : Dict =time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) _UpperCAmelCase : Tuple =(self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 _UpperCAmelCase : Dict =(self.max_length - self.patch_size) // self.time_stride + 1 _UpperCAmelCase : Any =frequency_out_dimension * time_out_dimension _UpperCAmelCase : List[str] =num_patches + 2 def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : List[Any] =floats_tensor([self.batch_size, self.max_length, self.num_mel_bins]) _UpperCAmelCase : List[Any] =None if self.use_labels: _UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size) _UpperCAmelCase : str =self.get_config() return config, input_values, labels def lowerCAmelCase ( self) -> Any: '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def lowerCAmelCase ( self , snake_case , snake_case , snake_case) -> Optional[int]: '''simple docstring''' _UpperCAmelCase : Any =ASTModel(config=snake_case) model.to(snake_case) model.eval() _UpperCAmelCase : Any =model(snake_case) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : List[Any] =self.prepare_config_and_inputs() ( _UpperCAmelCase ) : Dict =config_and_inputs _UpperCAmelCase : Union[str, Any] ={'input_values': input_values} return config, inputs_dict @require_torch class __magic_name__ ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,unittest.TestCase ): UpperCAmelCase =( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCAmelCase =( {"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel} if is_torch_available() else {} ) UpperCAmelCase =False UpperCAmelCase =False UpperCAmelCase =False UpperCAmelCase =False def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case) -> Tuple: '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def lowerCAmelCase ( self) -> Dict: '''simple docstring''' _UpperCAmelCase : List[Any] =ASTModelTester(self) _UpperCAmelCase : Any =ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=3_7) def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='AST does not use inputs_embeds') def lowerCAmelCase ( self) -> List[Any]: '''simple docstring''' pass def lowerCAmelCase ( self) -> List[str]: '''simple docstring''' _UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : Optional[Any] =model_class(snake_case) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _UpperCAmelCase : int =model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case , nn.Linear)) def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : Optional[int] =model_class(snake_case) _UpperCAmelCase : Optional[Any] =inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase : List[Any] =[*signature.parameters.keys()] _UpperCAmelCase : str =['input_values'] self.assertListEqual(arg_names[:1] , snake_case) def lowerCAmelCase ( self) -> Tuple: '''simple docstring''' _UpperCAmelCase : List[Any] =self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case) @slow def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : Dict =ASTModel.from_pretrained(snake_case) self.assertIsNotNone(snake_case) def lowerCamelCase__ ( ): '''simple docstring''' _UpperCAmelCase : Tuple =hf_hub_download( repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' ) _UpperCAmelCase : List[str] =torchaudio.load(_UpperCAmelCase ) return audio, sampling_rate @require_torch @require_torchaudio class __magic_name__ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self) -> Optional[int]: '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593') if is_torchaudio_available() else None ) @slow def lowerCAmelCase ( self) -> Optional[Any]: '''simple docstring''' _UpperCAmelCase : List[str] =self.default_feature_extractor _UpperCAmelCase : Tuple =ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593').to(snake_case) _UpperCAmelCase : List[str] =self.default_feature_extractor _UpperCAmelCase : Optional[int] =prepare_audio() _UpperCAmelCase : List[str] =audio.squeeze().numpy() _UpperCAmelCase : List[Any] =feature_extractor(snake_case , sampling_rate=snake_case , return_tensors='pt').to(snake_case) # forward pass with torch.no_grad(): _UpperCAmelCase : List[Any] =model(**snake_case) # verify the logits _UpperCAmelCase : Union[str, Any] =torch.Size((1, 5_2_7)) self.assertEqual(outputs.logits.shape , snake_case) _UpperCAmelCase : Any =torch.tensor([-0.87_60, -7.00_42, -8.66_02]).to(snake_case) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4))
359
'''simple docstring''' from __future__ import annotations import bisect def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ): '''simple docstring''' if hi < 0: _UpperCAmelCase : int =len(__lowerCamelCase ) while lo < hi: _UpperCAmelCase : Dict =lo + (hi - lo) // 2 if sorted_collection[mid] < item: _UpperCAmelCase : int =mid + 1 else: _UpperCAmelCase : Union[str, Any] =mid return lo def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ): '''simple docstring''' if hi < 0: _UpperCAmelCase : str =len(__lowerCamelCase ) while lo < hi: _UpperCAmelCase : List[Any] =lo + (hi - lo) // 2 if sorted_collection[mid] <= item: _UpperCAmelCase : Dict =mid + 1 else: _UpperCAmelCase : Tuple =mid return lo def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ): '''simple docstring''' sorted_collection.insert(bisect_left(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int = 0 , __lowerCamelCase : int = -1 ): '''simple docstring''' sorted_collection.insert(bisect_right(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase ) def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ): '''simple docstring''' _UpperCAmelCase : Optional[int] =0 _UpperCAmelCase : Any =len(__lowerCamelCase ) - 1 while left <= right: _UpperCAmelCase : int =left + (right - left) // 2 _UpperCAmelCase : Union[str, Any] =sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: _UpperCAmelCase : Optional[Any] =midpoint - 1 else: _UpperCAmelCase : int =midpoint + 1 return None def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int ): '''simple docstring''' _UpperCAmelCase : Any =bisect.bisect_left(__lowerCamelCase , __lowerCamelCase ) if index != len(__lowerCamelCase ) and sorted_collection[index] == item: return index return None def lowerCamelCase__ ( __lowerCamelCase : list[int] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ): '''simple docstring''' if right < left: return None _UpperCAmelCase : str =left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(__lowerCamelCase , __lowerCamelCase , midpoint + 1 , __lowerCamelCase ) if __name__ == "__main__": lowercase =input('Enter numbers separated by comma:\n').strip() lowercase =sorted(int(item) for item in user_input.split(',')) lowercase =int(input('Enter a single number to be found in the list:\n')) lowercase =binary_search(collection, target) if result is None: print(F"""{target} was not found in {collection}.""") else: print(F"""{target} was found at position {result} in {collection}.""")
242
0
'''simple docstring''' import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch a_ : Dict = logging.get_logger(__name__) class __UpperCamelCase : def __init__( self, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase=None, lowerCAmelCase=None ): """simple docstring""" if not conversation_id: lowerCamelCase_ =uuid.uuida() if past_user_inputs is None: lowerCamelCase_ =[] if generated_responses is None: lowerCamelCase_ =[] lowerCamelCase_ =conversation_id lowerCamelCase_ =past_user_inputs lowerCamelCase_ =generated_responses lowerCamelCase_ =text def __eq__( self, lowerCAmelCase ): """simple docstring""" if not isinstance(lowerCAmelCase, lowerCAmelCase ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = False ): """simple docstring""" if self.new_user_input: if overwrite: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ''' f'''with: "{text}".''' ) lowerCamelCase_ =text else: logger.warning( f'''User input added while unprocessed input was existing: "{self.new_user_input}" new input ''' f'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' ) else: lowerCamelCase_ =text def lowercase__ ( self ): """simple docstring""" if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowerCamelCase_ =None def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" self.generated_responses.append(lowerCAmelCase ) def lowercase__ ( self ): """simple docstring""" for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self ): """simple docstring""" lowerCamelCase_ =f'''Conversation id: {self.uuid} \n''' for is_user, text in self.iter_texts(): lowerCamelCase_ ='''user''' if is_user else '''bot''' output += f'''{name} >> {text} \n''' return output @add_end_docstrings( lowerCamelCase__ , r'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , ) class __UpperCamelCase ( lowerCamelCase__ ): def __init__( self, *lowerCAmelCase, **lowerCAmelCase ): """simple docstring""" super().__init__(*lowerCAmelCase, **lowerCAmelCase ) if self.tokenizer.pad_token_id is None: lowerCamelCase_ =self.tokenizer.eos_token def lowercase__ ( self, lowerCAmelCase=None, lowerCAmelCase=None, lowerCAmelCase=None, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ ={} lowerCamelCase_ ={} lowerCamelCase_ ={} if min_length_for_response is not None: lowerCamelCase_ =min_length_for_response if minimum_tokens is not None: lowerCamelCase_ =minimum_tokens if "max_length" in generate_kwargs: lowerCamelCase_ =generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowerCamelCase_ =clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowerCAmelCase ) return preprocess_params, forward_params, postprocess_params def __call__( self, lowerCAmelCase, lowerCAmelCase=0, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =super().__call__(lowerCAmelCase, num_workers=lowerCAmelCase, **lowerCAmelCase ) if isinstance(lowerCAmelCase, lowerCAmelCase ) and len(lowerCAmelCase ) == 1: return outputs[0] return outputs def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=32 ): """simple docstring""" if not isinstance(lowerCAmelCase, lowerCAmelCase ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. ''' '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer, '''_build_conversation_input_ids''' ): lowerCamelCase_ =self.tokenizer._build_conversation_input_ids(lowerCAmelCase ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowerCamelCase_ =self._legacy_parse_and_tokenize(lowerCAmelCase ) if self.framework == "pt": lowerCamelCase_ =torch.LongTensor([input_ids] ) elif self.framework == "tf": lowerCamelCase_ =tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=10, **lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =generate_kwargs.get('''max_length''', self.model.config.max_length ) lowerCamelCase_ =model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' ) lowerCamelCase_ =max_length - minimum_tokens lowerCamelCase_ =model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: lowerCamelCase_ =model_inputs['''attention_mask'''][:, -trim:] lowerCamelCase_ =model_inputs.pop('''conversation''' ) lowerCamelCase_ =max_length lowerCamelCase_ =self.model.generate(**lowerCAmelCase, **lowerCAmelCase ) if self.model.config.is_encoder_decoder: lowerCamelCase_ =1 else: lowerCamelCase_ =n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase=True ): """simple docstring""" lowerCamelCase_ =model_outputs['''output_ids'''] lowerCamelCase_ =self.tokenizer.decode( output_ids[0], skip_special_tokens=lowerCAmelCase, clean_up_tokenization_spaces=lowerCAmelCase, ) lowerCamelCase_ =model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(lowerCAmelCase ) return conversation def lowercase__ ( self, lowerCAmelCase ): """simple docstring""" lowerCamelCase_ =self.tokenizer.eos_token_id lowerCamelCase_ =[] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(lowerCAmelCase, add_special_tokens=lowerCAmelCase ) ) if len(lowerCAmelCase ) > self.tokenizer.model_max_length: lowerCamelCase_ =input_ids[-self.tokenizer.model_max_length :] return input_ids
75
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process lowerCAmelCase = logging.getLogger(__name__) lowerCAmelCase = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _a : _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''The model checkpoint for weights initialization. Leave None if you want to train a model from''' ''' scratch.''' ) } , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) @dataclass class _a : _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={ '''help''': ( '''The input training data files (multiple files in glob format). ''' '''Very often splitting large files to smaller files can prevent tokenizer going out of memory''' ) } , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''An optional input train ref data file for whole word mask in Chinese.'''} , ) _lowercase : Optional[str] = field( default=UpperCamelCase__ , metadata={'''help''': '''An optional input eval ref data file for whole word mask in Chinese.'''} , ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={'''help''': '''Whether distinct lines of text in the dataset are to be handled as distinct sequences.'''} , ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={'''help''': '''Train with masked-language modeling loss instead of language modeling.'''} ) _lowercase : bool = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether ot not to use whole word mask.'''} ) _lowercase : float = field( default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} ) _lowercase : float = field( default=1 / 6 , metadata={ '''help''': ( '''Ratio of length of a span of masked tokens to surrounding context length for permutation language''' ''' modeling.''' ) } , ) _lowercase : int = field( default=5 , metadata={'''help''': '''Maximum length of a span of masked tokens for permutation language modeling.'''} ) _lowercase : int = field( default=-1 , metadata={ '''help''': ( '''Optional input sequence length after tokenization.''' '''The training dataset will be truncated in block of this size for training.''' '''Default to the model max input length for single sentence inputs (take into account special tokens).''' ) } , ) _lowercase : bool = field( default=UpperCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , ): """simple docstring""" def _dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError('''You need to set world whole masking and mlm to True for Chinese Whole Word Mask''' ) return LineByLineWithRefDataset( tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , ref_path=SCREAMING_SNAKE_CASE , ) return LineByLineTextDataset(tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size ) else: return TextDataset( tokenizer=SCREAMING_SNAKE_CASE , file_path=SCREAMING_SNAKE_CASE , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=SCREAMING_SNAKE_CASE , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(SCREAMING_SNAKE_CASE ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def _a ( ): """simple docstring""" lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( '''Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file ''' '''or remove the --do_eval argument.''' ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ''' --overwrite_output_dir to overcome.''' ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: lowercase__ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowercase__ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: lowercase__ = CONFIG_MAPPING[model_args.model_type]() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.tokenizer_name: lowercase__ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: lowercase__ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( '''You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another''' ''' script, save it,and load it from here, using --tokenizer_name''' ) if model_args.model_name_or_path: lowercase__ = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) else: logger.info('''Training new model from scratch''' ) lowercase__ = AutoModelWithLMHead.from_config(SCREAMING_SNAKE_CASE ) model.resize_token_embeddings(len(SCREAMING_SNAKE_CASE ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( '''BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the''' '''--mlm flag (masked language modeling).''' ) if data_args.block_size <= 0: lowercase__ = tokenizer.max_len # Our input block size will be the max possible for the model else: lowercase__ = min(data_args.block_size , tokenizer.max_len ) # Get datasets lowercase__ = ( get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) lowercase__ = ( get_dataset(SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , evaluate=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": lowercase__ = DataCollatorForPermutationLanguageModeling( tokenizer=SCREAMING_SNAKE_CASE , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: lowercase__ = DataCollatorForWholeWordMask( tokenizer=SCREAMING_SNAKE_CASE , mlm_probability=data_args.mlm_probability ) else: lowercase__ = DataCollatorForLanguageModeling( tokenizer=SCREAMING_SNAKE_CASE , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer lowercase__ = Trainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , prediction_loss_only=SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: lowercase__ = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=SCREAMING_SNAKE_CASE ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase__ = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowercase__ = trainer.evaluate() lowercase__ = math.exp(eval_output['''eval_loss'''] ) lowercase__ = {'''perplexity''': perplexity} lowercase__ = os.path.join(training_args.output_dir , '''eval_results_lm.txt''' ) if trainer.is_world_master(): with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , SCREAMING_SNAKE_CASE , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) results.update(SCREAMING_SNAKE_CASE ) return results def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" main() if __name__ == "__main__": main()
110
0
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : int = 1 # To kept the Calculated Value # Since C(n, k) = C(n, n-k) if k > (n - k): snake_case_ : Tuple = n - k # Calculate C(n,k) for i in range(_lowerCAmelCase ): result *= n - i result //= i + 1 return result def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]: """simple docstring""" return binomial_coefficient(2 * node_count , _lowerCAmelCase ) // (node_count + 1) def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" if n < 0: raise ValueError('''factorial() not defined for negative values''' ) snake_case_ : Tuple = 1 for i in range(1 , n + 1 ): result *= i return result def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return catalan_number(_lowerCAmelCase ) * factorial(_lowerCAmelCase ) if __name__ == "__main__": lowerCAmelCase_ = int(input('''Enter the number of nodes: ''').strip() or 0) if node_count <= 0: raise ValueError('''We need some nodes to work with.''') print( F'''Given {node_count} nodes, there are {binary_tree_count(node_count)} ''' F'''binary trees and {catalan_number(node_count)} binary search trees.''' )
354
import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> int: """simple docstring""" try: snake_case_ : Optional[Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case_ : Tuple = default else: # KEY is set, convert it to True or False. try: snake_case_ : Union[str, Any] = strtobool(_UpperCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''' ) return _value lowerCAmelCase_ = parse_flag_from_env('''RUN_SLOW''', default=False) def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" return unittest.skip('''Test was skipped''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Dict: """simple docstring""" return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]: """simple docstring""" return unittest.skipUnless( is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[Any]: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Any: """simple docstring""" return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase=None , _UpperCamelCase=None ) -> Tuple: """simple docstring""" if test_case is None: return partial(_UpperCamelCase , version=_UpperCamelCase ) return unittest.skipUnless(is_torch_version('''>=''' , _UpperCamelCase ) , f'''test requires torch version >= {version}''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[int]: """simple docstring""" return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(_UpperCamelCase ) lowerCAmelCase_ = ( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowerCamelCase_ ( _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" return unittest.skipUnless( _atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(_UpperCamelCase ) class __lowerCAmelCase ( unittest.TestCase ): lowerCamelCase_ : str = True @classmethod def lowerCamelCase (cls ) -> List[Any]: '''simple docstring''' snake_case_ : Any = tempfile.mkdtemp() @classmethod def lowerCamelCase (cls ) -> List[Any]: '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def lowerCamelCase (self ) -> Optional[int]: '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob('''**/*''' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(__magic_name__ ) class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self ) -> int: '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class __lowerCAmelCase ( unittest.TestCase ): def lowerCamelCase (self , __magic_name__ ) -> Dict: '''simple docstring''' snake_case_ : str = mocks if isinstance(__magic_name__ , (tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowerCamelCase_ ( _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ : Optional[Any] = AcceleratorState() snake_case_ : List[str] = tensor[None].clone().to(state.device ) snake_case_ : Optional[Any] = gather(_UpperCamelCase ).cpu() snake_case_ : Optional[Any] = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , _UpperCamelCase ): return False return True class __lowerCAmelCase : def __init__(self , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]: '''simple docstring''' snake_case_ : Any = returncode snake_case_ : List[Any] = stdout snake_case_ : Tuple = stderr async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" while True: snake_case_ : Tuple = await stream.readline() if line: callback(_UpperCamelCase ) else: break async def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ) -> _RunOutput: """simple docstring""" if echo: print('''\nRunning: ''' , ''' '''.join(_UpperCamelCase ) ) snake_case_ : List[str] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case_ : List[Any] = [] snake_case_ : List[Any] = [] def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ): snake_case_ : Union[str, Any] = line.decode('''utf-8''' ).rstrip() sink.append(_UpperCamelCase ) if not quiet: print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='''stdout:''' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='''stderr:''' ) ) ), ] , timeout=_UpperCamelCase , ) return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ) -> _RunOutput: """simple docstring""" snake_case_ : List[str] = asyncio.get_event_loop() snake_case_ : List[Any] = loop.run_until_complete( _stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) ) snake_case_ : Optional[int] = ''' '''.join(_UpperCamelCase ) if result.returncode > 0: snake_case_ : Union[str, Any] = '''\n'''.join(result.stderr ) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''' ) return result class __lowerCAmelCase ( _a ): pass def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]: """simple docstring""" try: snake_case_ : List[str] = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(_UpperCamelCase , '''decode''' ): snake_case_ : Tuple = output.decode('''utf-8''' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( f'''Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
279
0
'''simple docstring''' def UpperCAmelCase__ ( UpperCAmelCase__ ) -> int: A_ = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def UpperCAmelCase__ ( UpperCAmelCase__ = 1_00 ) -> int: A_ = 1 A_ = 2 for i in range(2, max_n + 1 ): A_ = pre_numerator A_ = 2 * i // 3 if i % 3 == 0 else 1 A_ = cur_numerator A_ = e_cont * pre_numerator + temp return sum_digits(UpperCAmelCase__ ) if __name__ == "__main__": print(f"""{solution() = }""")
162
'''simple docstring''' import warnings from ...utils import logging from .image_processing_clip import CLIPImageProcessor __lowerCamelCase = logging.get_logger(__name__) class A__ ( _snake_case ): def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None: '''simple docstring''' warnings.warn( """The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use CLIPImageProcessor instead.""" , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
162
1
from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """huggingface/informer-tourism-monthly""": ( """https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json""" ), # See all Informer models at https://huggingface.co/models?filter=informer } class _lowerCAmelCase ( UpperCAmelCase_ ): '''simple docstring''' a_ : List[str] ="""informer""" a_ : Dict ={ """hidden_size""": """d_model""", """num_attention_heads""": """encoder_attention_heads""", """num_hidden_layers""": """encoder_layers""", } def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[int] = None , UpperCamelCase : str = "student_t" , UpperCamelCase : str = "nll" , UpperCamelCase : int = 1 , UpperCamelCase : List[int] = None , UpperCamelCase : Optional[Union[str, bool]] = "mean" , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : int = 64 , UpperCamelCase : int = 32 , UpperCamelCase : int = 32 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : int = 2 , UpperCamelCase : bool = True , UpperCamelCase : str = "gelu" , UpperCamelCase : float = 0.05 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : float = 0.1 , UpperCamelCase : int = 1_00 , UpperCamelCase : float = 0.02 , UpperCamelCase : Tuple=True , UpperCamelCase : str = "prob" , UpperCamelCase : int = 5 , UpperCamelCase : bool = True , **UpperCamelCase : List[str] , ): '''simple docstring''' _snake_case : List[Any] = prediction_length _snake_case : List[Any] = context_length or prediction_length _snake_case : Dict = distribution_output _snake_case : int = loss _snake_case : Dict = input_size _snake_case : int = num_time_features _snake_case : int = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7] _snake_case : Optional[Any] = scaling _snake_case : Any = num_dynamic_real_features _snake_case : str = num_static_real_features _snake_case : List[str] = num_static_categorical_features # set cardinality if cardinality and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) _snake_case : Tuple = cardinality else: _snake_case : Optional[Any] = [0] # set embedding_dimension if embedding_dimension and num_static_categorical_features > 0: if len(UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) _snake_case : int = embedding_dimension else: _snake_case : Dict = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] _snake_case : int = num_parallel_samples # Transformer architecture configuration _snake_case : Dict = input_size * len(self.lags_sequence ) + self._number_of_features _snake_case : Union[str, Any] = d_model _snake_case : int = encoder_attention_heads _snake_case : Optional[Any] = decoder_attention_heads _snake_case : int = encoder_ffn_dim _snake_case : Dict = decoder_ffn_dim _snake_case : str = encoder_layers _snake_case : List[str] = decoder_layers _snake_case : Optional[Any] = dropout _snake_case : int = attention_dropout _snake_case : int = activation_dropout _snake_case : str = encoder_layerdrop _snake_case : List[str] = decoder_layerdrop _snake_case : Dict = activation_function _snake_case : Tuple = init_std _snake_case : Dict = use_cache # Informer _snake_case : List[Any] = attention_type _snake_case : Any = sampling_factor _snake_case : Dict = distil super().__init__(is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) @property def UpperCamelCase_ ( self : List[str] ): '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
260
def lowerCamelCase_ ( lowerCAmelCase: int )-> bool: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
260
1
"""simple docstring""" import math def _snake_case ( _snake_case : List[Any] , _snake_case : Any ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(_snake_case ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError('''This should never happen''' ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. snake_case__ : List[Any] = '''Enter the base and the power separated by a comma: ''' snake_case__ , snake_case__ : Optional[int] = map(int, input(prompt).split(''',''')) snake_case__ , snake_case__ : str = map(int, input(prompt).split(''',''')) # We find the log of each number, using the function res(), which takes two # arguments. snake_case__ : str = res(xa, ya) snake_case__ : Tuple = res(xa, ya) # We check for the largest number if resa > resa: print('''Largest number is''', xa, '''^''', ya) elif resa > resa: print('''Largest number is''', xa, '''^''', ya) else: print('''Both are equal''')
60
import importlib import os import sys # This is required to make the module import works (when the python process is running from the root of the repo) sys.path.append('.') def __lowercase ( lowerCamelCase : Any ): UpperCamelCase_ : Union[str, Any] = test_file.split(os.path.sep ) if components[0:2] != ["tests", "models"]: raise ValueError( '`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got ' F"{test_file} instead." ) UpperCamelCase_ : str = components[-1] if not test_fn.endswith('py' ): raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." ) if not test_fn.startswith('test_modeling_' ): raise ValueError( F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." ) UpperCamelCase_ : Union[str, Any] = components[:-1] + [test_fn.replace('.py' , '' )] UpperCamelCase_ : List[Any] = '.'.join(lowerCamelCase ) return test_module_path def __lowercase ( lowerCamelCase : Optional[Any] ): UpperCamelCase_ : List[Any] = get_module_path(lowerCamelCase ) UpperCamelCase_ : Union[str, Any] = importlib.import_module(lowerCamelCase ) return test_module def __lowercase ( lowerCamelCase : List[str] ): UpperCamelCase_ : int = [] UpperCamelCase_ : Tuple = get_test_module(lowerCamelCase ) for attr in dir(lowerCamelCase ): if attr.endswith('ModelTester' ): tester_classes.append(getattr(lowerCamelCase , lowerCamelCase ) ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : str ): UpperCamelCase_ : List[str] = [] UpperCamelCase_ : Union[str, Any] = get_test_module(lowerCamelCase ) for attr in dir(lowerCamelCase ): UpperCamelCase_ : Dict = getattr(lowerCamelCase , lowerCamelCase ) # (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking # `all_model_classes` is not empty (which also excludes other special classes). UpperCamelCase_ : Optional[int] = getattr(lowerCamelCase , 'all_model_classes' , [] ) if len(lowerCamelCase ) > 0: test_classes.append(lowerCamelCase ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : Dict ): UpperCamelCase_ : int = get_test_classes(lowerCamelCase ) UpperCamelCase_ : List[Any] = set() for test_class in test_classes: model_classes.update(test_class.all_model_classes ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : Tuple ): UpperCamelCase_ : int = test_class() if hasattr(lowerCamelCase , 'setUp' ): test.setUp() UpperCamelCase_ : List[Any] = None if hasattr(lowerCamelCase , 'model_tester' ): # `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case. if test.model_tester is not None: UpperCamelCase_ : Optional[Any] = test.model_tester.__class__ return model_tester def __lowercase ( lowerCamelCase : Tuple , lowerCamelCase : Dict ): UpperCamelCase_ : Optional[Any] = get_test_classes(lowerCamelCase ) UpperCamelCase_ : Tuple = [] for test_class in test_classes: if model_class in test_class.all_model_classes: target_test_classes.append(lowerCamelCase ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : Any , lowerCamelCase : Tuple ): UpperCamelCase_ : List[Any] = get_test_classes_for_model(lowerCamelCase , lowerCamelCase ) UpperCamelCase_ : int = [] for test_class in test_classes: UpperCamelCase_ : Tuple = get_model_tester_from_test_class(lowerCamelCase ) if tester_class is not None: tester_classes.append(lowerCamelCase ) # sort with class names return sorted(lowerCamelCase , key=lambda lowerCamelCase : x.__name__ ) def __lowercase ( lowerCamelCase : str ): UpperCamelCase_ : Tuple = get_test_classes(lowerCamelCase ) UpperCamelCase_ : Tuple = {test_class: get_model_tester_from_test_class(lowerCamelCase ) for test_class in test_classes} return test_tester_mapping def __lowercase ( lowerCamelCase : Any ): UpperCamelCase_ : List[str] = get_model_classes(lowerCamelCase ) UpperCamelCase_ : int = { model_class: get_test_classes_for_model(lowerCamelCase , lowerCamelCase ) for model_class in model_classes } return model_test_mapping def __lowercase ( lowerCamelCase : Tuple ): UpperCamelCase_ : Tuple = get_model_classes(lowerCamelCase ) UpperCamelCase_ : Optional[Any] = { model_class: get_tester_classes_for_model(lowerCamelCase , lowerCamelCase ) for model_class in model_classes } return model_to_tester_mapping def __lowercase ( lowerCamelCase : Any ): if isinstance(lowerCamelCase , lowerCamelCase ): return o elif isinstance(lowerCamelCase , lowerCamelCase ): return o.__name__ elif isinstance(lowerCamelCase , (list, tuple) ): return [to_json(lowerCamelCase ) for x in o] elif isinstance(lowerCamelCase , lowerCamelCase ): return {to_json(lowerCamelCase ): to_json(lowerCamelCase ) for k, v in o.items()} else: return o
175
0
"""simple docstring""" def __lowerCamelCase ( __UpperCamelCase = 50000000 ) -> int: """simple docstring""" lowerCAmelCase_ : Any = set() lowerCAmelCase_ : Tuple = int((limit - 24) ** (1 / 2) ) lowerCAmelCase_ : Optional[Any] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , __UpperCamelCase ) ) ) for primea in primes: lowerCAmelCase_ : Dict = primea * primea for primea in primes: lowerCAmelCase_ : Any = primea * primea * primea if square + cube >= limit - 16: break for primea in primes: lowerCAmelCase_ : Any = primea * primea * primea * primea lowerCAmelCase_ : Tuple = square + cube + tetr if total >= limit: break ret.add(__UpperCamelCase ) return len(__UpperCamelCase ) if __name__ == "__main__": print(F"""{solution() = }""")
365
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = { """EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""", # See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo } class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : Union[str, Any] = """gpt_neo""" a_ : List[Any] = ["""past_key_values"""] a_ : Optional[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""} def __init__( self : Optional[int] , a_ : List[str]=5_02_57 , a_ : List[str]=20_48 , a_ : Union[str, Any]=20_48 , a_ : Union[str, Any]=24 , a_ : Optional[int]=[[["global", "local"], 12]] , a_ : str=16 , a_ : Optional[Any]=None , a_ : str=2_56 , a_ : Union[str, Any]="gelu_new" , a_ : Optional[int]=0.0 , a_ : Optional[Any]=0.0 , a_ : List[Any]=0.0 , a_ : List[Any]=0.1 , a_ : Optional[Any]=1e-5 , a_ : Optional[Any]=0.02 , a_ : int=True , a_ : Optional[Any]=5_02_56 , a_ : Tuple=5_02_56 , **a_ : str , ): lowerCAmelCase_ : Optional[Any] = vocab_size lowerCAmelCase_ : str = max_position_embeddings lowerCAmelCase_ : Tuple = hidden_size lowerCAmelCase_ : Union[str, Any] = num_layers lowerCAmelCase_ : str = num_heads lowerCAmelCase_ : List[str] = intermediate_size lowerCAmelCase_ : Union[str, Any] = window_size lowerCAmelCase_ : Any = activation_function lowerCAmelCase_ : str = resid_dropout lowerCAmelCase_ : Union[str, Any] = embed_dropout lowerCAmelCase_ : Optional[Any] = attention_dropout lowerCAmelCase_ : Dict = classifier_dropout lowerCAmelCase_ : int = layer_norm_epsilon lowerCAmelCase_ : Dict = initializer_range lowerCAmelCase_ : List[Any] = use_cache lowerCAmelCase_ : Optional[int] = bos_token_id lowerCAmelCase_ : str = eos_token_id lowerCAmelCase_ : Optional[Any] = attention_types lowerCAmelCase_ : Optional[Any] = self.expand_attention_types_params(a_ ) if len(self.attention_layers ) != self.num_layers: raise ValueError( "Configuration for convolutional module is incorrect. " "It is required that `len(config.attention_layers)` == `config.num_layers` " f'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, ''' f'''`config.num_layers = {self.num_layers}`. ''' "`config.attention_layers` is prepared using `config.attention_types`. " "Please verify the value of `config.attention_types` argument." ) super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ ) @staticmethod def lowerCamelCase ( a_ : Optional[Any] ): lowerCAmelCase_ : int = [] for item in attention_types: for _ in range(item[1] ): attentions.extend(item[0] ) return attentions def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> int: """simple docstring""" import torch lowerCAmelCase_ : str = input.size() lowerCAmelCase_ : List[Any] = len(__UpperCamelCase ) lowerCAmelCase_ : Tuple = shape[dimension] lowerCAmelCase_ : Tuple = torch.arange(0 , __UpperCamelCase , __UpperCamelCase ) lowerCAmelCase_ : List[Any] = torch.div(sizedim - size , __UpperCamelCase , rounding_mode="floor" ) + 1 lowerCAmelCase_ : Dict = torch.arange(__UpperCamelCase ) + low_indices[:min_length][:, None] lowerCAmelCase_ : Tuple = [slice(__UpperCamelCase )] * rank lowerCAmelCase_ : List[str] = indices lowerCAmelCase_ : Dict = input[s] lowerCAmelCase_ : Tuple = list(range(0 , rank + 1 ) ) perm.append(perm.pop(dimension + 1 ) ) return sliced.permute(__UpperCamelCase ) def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> Any: """simple docstring""" import torch lowerCAmelCase_ : Optional[int] = torch.arange(1 , __UpperCamelCase ) lowerCAmelCase_ : Tuple = torch.remainder(__UpperCamelCase , __UpperCamelCase ) lowerCAmelCase_ : Tuple = remainders == 0 lowerCAmelCase_ : List[Any] = candidates[divisor_indices] lowerCAmelCase_ : List[str] = torch.max(__UpperCamelCase ) return largest_divisor, torch.div(__UpperCamelCase , __UpperCamelCase , rounding_mode="floor" ) class __lowerCamelCase ( A__ ): '''simple docstring''' @property def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : Any = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} ) if self.use_past: self.fill_with_past_key_values_(a_ , direction="inputs" ) lowerCAmelCase_ : int = {0: "batch", 1: "past_sequence + sequence"} else: lowerCAmelCase_ : str = {0: "batch", 1: "sequence"} return common_inputs @property def lowerCamelCase ( self : int ): return self._config.num_heads def lowerCamelCase ( self : Optional[Any] , a_ : PreTrainedTokenizer , a_ : int = -1 , a_ : int = -1 , a_ : bool = False , a_ : Optional[TensorType] = None , ): lowerCAmelCase_ : int = super(a_ , self ).generate_dummy_inputs( a_ , batch_size=a_ , seq_length=a_ , is_pair=a_ , framework=a_ ) # We need to order the input in the way they appears in the forward() lowerCAmelCase_ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values lowerCAmelCase_ : str = seqlen + 2 lowerCAmelCase_ : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) lowerCAmelCase_ : Optional[int] = [ (torch.zeros(a_ ), torch.zeros(a_ )) for _ in range(self.num_layers ) ] lowerCAmelCase_ : Tuple = common_inputs["attention_mask"] if self.use_past: lowerCAmelCase_ : List[str] = ordered_inputs["attention_mask"].dtype lowerCAmelCase_ : Optional[Any] = torch.cat( [ordered_inputs["attention_mask"], torch.ones(a_ , a_ , dtype=a_ )] , dim=1 ) return ordered_inputs @property def lowerCamelCase ( self : Union[str, Any] ): return 13
161
0
from __future__ import annotations from collections.abc import Callable from typing import Any, Generic, TypeVar snake_case_ = TypeVar('T') class SCREAMING_SNAKE_CASE__ ( Generic[T] ): def __init__(self : List[Any] , a__ : list[T] , a__ : Callable[[T, T], T] ): """simple docstring""" __snake_case = None __snake_case = len(a__ ) __snake_case = [any_type for _ in range(self.N )] + arr __snake_case = fnc self.build() def a (self : Dict ): """simple docstring""" for p in range(self.N - 1 , 0 , -1 ): __snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def a (self : Any , a__ : int , a__ : T ): """simple docstring""" p += self.N __snake_case = v while p > 1: __snake_case = p // 2 __snake_case = self.fn(self.st[p * 2] , self.st[p * 2 + 1] ) def a (self : int , a__ : int , a__ : int ): # noqa: E741 """simple docstring""" __snake_case , __snake_case = l + self.N, r + self.N __snake_case = None while l <= r: if l % 2 == 1: __snake_case = self.st[l] if res is None else self.fn(a__ , self.st[l] ) if r % 2 == 0: __snake_case = self.st[r] if res is None else self.fn(a__ , self.st[r] ) __snake_case , __snake_case = (l + 1) // 2, (r - 1) // 2 return res if __name__ == "__main__": from functools import reduce snake_case_ = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12] snake_case_ = { 0: 7, 1: 2, 2: 6, 3: -14, 4: 5, 5: 4, 6: 7, 7: -10, 8: 9, 9: 10, 10: 12, 11: 1, } snake_case_ = SegmentTree(test_array, min) snake_case_ = SegmentTree(test_array, max) snake_case_ = SegmentTree(test_array, lambda a, b: a + b) def lowerCamelCase__ ( ) -> None: for i in range(len(snake_case_ ) ): for j in range(snake_case_ , len(snake_case_ ) ): __snake_case = reduce(snake_case_ , test_array[i : j + 1] ) __snake_case = reduce(snake_case_ , test_array[i : j + 1] ) __snake_case = reduce(lambda snake_case_ , snake_case_ : a + b , test_array[i : j + 1] ) assert min_range == min_segment_tree.query(snake_case_ , snake_case_ ) assert max_range == max_segment_tree.query(snake_case_ , snake_case_ ) assert sum_range == sum_segment_tree.query(snake_case_ , snake_case_ ) test_all_segments() for index, value in test_updates.items(): snake_case_ = value min_segment_tree.update(index, value) max_segment_tree.update(index, value) sum_segment_tree.update(index, value) test_all_segments()
24
"""simple docstring""" from __future__ import annotations from typing import Any class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = 0 ): __a , __a = row, column __a = [[default_value for c in range(lowerCamelCase )] for r in range(lowerCamelCase )] def __str__( self ): __a = F"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier __a = 0 for row_vector in self.array: for obj in row_vector: __a = max(lowerCamelCase , len(str(lowerCamelCase ) ) ) __a = F"%{max_element_length}s" # Make string and return def single_line(lowerCamelCase ) -> str: nonlocal string_format_identifier __a = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(lowerCamelCase ) for row_vector in self.array ) return s def __repr__( self ): return str(self ) def a__ ( self , lowerCamelCase ): if not (isinstance(lowerCamelCase , (list, tuple) ) and len(lowerCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self , lowerCamelCase , lowerCamelCase ): assert self.validate_indicies(lowerCamelCase ) __a = value def __add__( self , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == another.row and self.column == another.column # Add __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] + another[r, c] return result def __neg__( self ): __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = -self[r, c] return result def __sub__( self , lowerCamelCase ): return self + (-another) def __mul__( self , lowerCamelCase ): if isinstance(lowerCamelCase , (int, float) ): # Scalar multiplication __a = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] * another return result elif isinstance(lowerCamelCase , lowerCamelCase ): # Matrix multiplication assert self.column == another.row __a = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: __a = F"Unsupported type given for another ({type(lowerCamelCase )})" raise TypeError(lowerCamelCase ) def a__ ( self ): __a = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): __a = self[r, c] return result def a__ ( self , lowerCamelCase , lowerCamelCase ): assert isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(lowerCamelCase , lowerCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate __a = v.transpose() __a = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _lowerCamelCase( ): # a^(-1) __a = Matrix(3 , 3 , 0 ) for i in range(3 ): __a = 1 print(F"a^(-1) is {ainv}" ) # u, v __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 1, 2, -3 __a = Matrix(3 , 1 , 0 ) __a , __a , __a = 4, -2, 5 print(F"u is {u}" ) print(F"v is {v}" ) print(F"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(a , a )}" ) def _lowerCamelCase( ): import doctest doctest.testmod() testa()
261
0
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Dict )->List[Any]: '''simple docstring''' __lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __lowerCAmelCase : str = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_snake_case ) __lowerCAmelCase : str = -1 __lowerCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case ) __lowerCAmelCase : str = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case ) __lowerCAmelCase : int = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: __lowerCAmelCase : Union[str, Any] = TextStreamer(_snake_case ) model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __lowerCAmelCase : Dict = cs.out[:-1] self.assertEqual(_snake_case , _snake_case ) def UpperCAmelCase__ ( self : List[Any] )->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __lowerCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_snake_case ) __lowerCAmelCase : Tuple = -1 __lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case ) __lowerCAmelCase : Union[str, Any] = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case ) __lowerCAmelCase : Tuple = tokenizer.decode(greedy_ids[0] ) __lowerCAmelCase : Optional[int] = TextIteratorStreamer(_snake_case ) __lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} __lowerCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=_snake_case ) thread.start() __lowerCAmelCase : str = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_snake_case , _snake_case ) def UpperCAmelCase__ ( self : Tuple )->Optional[Any]: '''simple docstring''' __lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __lowerCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_snake_case ) __lowerCAmelCase : List[Any] = -1 __lowerCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case ) __lowerCAmelCase : List[str] = model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case ) __lowerCAmelCase : List[str] = greedy_ids[:, input_ids.shape[1] :] __lowerCAmelCase : Optional[int] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: __lowerCAmelCase : str = TextStreamer(_snake_case , skip_prompt=_snake_case ) model.generate(_snake_case , max_new_tokens=10 , do_sample=_snake_case , streamer=_snake_case ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer __lowerCAmelCase : Any = cs.out[:-1] self.assertEqual(_snake_case , _snake_case ) def UpperCAmelCase__ ( self : str )->str: '''simple docstring''' __lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""distilgpt2""" ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained("""distilgpt2""" ).to(_snake_case ) __lowerCAmelCase : str = -1 __lowerCAmelCase : str = torch.ones((1, 5) , device=_snake_case ).long() * model.config.bos_token_id with CaptureStdout() as cs: __lowerCAmelCase : Union[str, Any] = TextStreamer(_snake_case , skip_special_tokens=_snake_case ) model.generate(_snake_case , max_new_tokens=1 , do_sample=_snake_case , streamer=_snake_case ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token __lowerCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n" __lowerCAmelCase : Optional[Any] = tokenizer(_snake_case , return_tensors="""pt""" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def UpperCAmelCase__ ( self : Tuple )->Any: '''simple docstring''' __lowerCAmelCase : int = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ) __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" ).to(_snake_case ) __lowerCAmelCase : str = -1 __lowerCAmelCase : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_snake_case ) __lowerCAmelCase : str = TextIteratorStreamer(_snake_case , timeout=0.001 ) __lowerCAmelCase : Any = {"""input_ids""": input_ids, """max_new_tokens""": 10, """do_sample""": False, """streamer""": streamer} __lowerCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=_snake_case ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_snake_case ): __lowerCAmelCase : Optional[int] = """""" for new_text in streamer: streamer_text += new_text
357
from __future__ import annotations import inspect import unittest from math import floor import numpy as np from transformers import CvtConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFCvtForImageClassification, TFCvtModel from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_ ( __lowercase ): def UpperCAmelCase__ ( self : Dict )->List[Any]: '''simple docstring''' __lowerCAmelCase : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(_snake_case , """embed_dim""" ) ) self.parent.assertTrue(hasattr(_snake_case , """num_heads""" ) ) class snake_case_ : def __init__( self : Dict , _snake_case : int , _snake_case : str=13 , _snake_case : Optional[int]=64 , _snake_case : Union[str, Any]=3 , _snake_case : Any=[16, 48, 96] , _snake_case : List[str]=[1, 3, 6] , _snake_case : str=[1, 2, 10] , _snake_case : Tuple=[7, 3, 3] , _snake_case : Tuple=[4, 2, 2] , _snake_case : Tuple=[2, 1, 1] , _snake_case : List[str]=[2, 2, 2] , _snake_case : Tuple=[False, False, True] , _snake_case : int=[0.0, 0.0, 0.0] , _snake_case : Union[str, Any]=0.02 , _snake_case : List[str]=1E-12 , _snake_case : str=True , _snake_case : Any=True , _snake_case : Optional[Any]=2 , )->List[str]: '''simple docstring''' __lowerCAmelCase : List[str] = parent __lowerCAmelCase : int = batch_size __lowerCAmelCase : Optional[int] = image_size __lowerCAmelCase : Optional[Any] = patch_sizes __lowerCAmelCase : Tuple = patch_stride __lowerCAmelCase : List[Any] = patch_padding __lowerCAmelCase : Tuple = is_training __lowerCAmelCase : str = use_labels __lowerCAmelCase : List[Any] = num_labels __lowerCAmelCase : int = num_channels __lowerCAmelCase : Tuple = embed_dim __lowerCAmelCase : Optional[int] = num_heads __lowerCAmelCase : Union[str, Any] = stride_kv __lowerCAmelCase : List[Any] = depth __lowerCAmelCase : int = cls_token __lowerCAmelCase : Optional[Any] = attention_drop_rate __lowerCAmelCase : Union[str, Any] = initializer_range __lowerCAmelCase : Any = layer_norm_eps def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase : Optional[int] = None if self.use_labels: # create a random int32 tensor of given shape __lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels ) __lowerCAmelCase : List[Any] = self.get_config() return config, pixel_values, labels def UpperCAmelCase__ ( self : List[str] )->int: '''simple docstring''' return CvtConfig( image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , ) def UpperCAmelCase__ ( self : List[Any] , _snake_case : int , _snake_case : str , _snake_case : Union[str, Any] )->Tuple: '''simple docstring''' __lowerCAmelCase : str = TFCvtModel(config=_snake_case ) __lowerCAmelCase : Optional[Any] = model(_snake_case , training=_snake_case ) __lowerCAmelCase : str = (self.image_size, self.image_size) __lowerCAmelCase , __lowerCAmelCase : Tuple = image_size[0], image_size[1] for i in range(len(self.depth ) ): __lowerCAmelCase : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) __lowerCAmelCase : Any = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) ) def UpperCAmelCase__ ( self : Tuple , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any] )->Dict: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.num_labels __lowerCAmelCase : Optional[int] = TFCvtForImageClassification(_snake_case ) __lowerCAmelCase : str = model(_snake_case , labels=_snake_case , training=_snake_case ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCAmelCase__ ( self : Tuple )->str: '''simple docstring''' __lowerCAmelCase : Tuple = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs __lowerCAmelCase : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class snake_case_ ( __lowercase ,__lowercase ,unittest.TestCase ): A_ = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else () A_ = ( {'feature-extraction': TFCvtModel, 'image-classification': TFCvtForImageClassification} if is_tf_available() else {} ) A_ = False A_ = False A_ = False A_ = False A_ = False def UpperCAmelCase__ ( self : List[str] )->str: '''simple docstring''' __lowerCAmelCase : Tuple = TFCvtModelTester(self ) __lowerCAmelCase : Optional[Any] = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 ) def UpperCAmelCase__ ( self : Union[str, Any] )->Optional[int]: '''simple docstring''' self.config_tester.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() @unittest.skip(reason="""Cvt does not output attentions""" ) def UpperCAmelCase__ ( self : str )->List[Any]: '''simple docstring''' pass @unittest.skip(reason="""Cvt does not use inputs_embeds""" ) def UpperCAmelCase__ ( self : Union[str, Any] )->List[str]: '''simple docstring''' pass @unittest.skip(reason="""Cvt does not support input and output embeddings""" ) def UpperCAmelCase__ ( self : Tuple )->Optional[int]: '''simple docstring''' pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) def UpperCAmelCase__ ( self : Dict )->Any: '''simple docstring''' super().test_dataset_conversion() @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , ) @slow def UpperCAmelCase__ ( self : Dict )->Dict: '''simple docstring''' super().test_keras_fit() @unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" ) def UpperCAmelCase__ ( self : Union[str, Any] )->str: '''simple docstring''' __lowerCAmelCase : Optional[int] = tf.keras.mixed_precision.Policy("""mixed_float16""" ) tf.keras.mixed_precision.set_global_policy(_snake_case ) super().test_keras_fit() tf.keras.mixed_precision.set_global_policy("""float32""" ) def UpperCAmelCase__ ( self : Tuple )->Tuple: '''simple docstring''' __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Union[str, Any] = model_class(_snake_case ) __lowerCAmelCase : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : int = [*signature.parameters.keys()] __lowerCAmelCase : Any = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _snake_case ) def UpperCAmelCase__ ( self : int )->List[str]: '''simple docstring''' def check_hidden_states_output(_snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[Any] ): __lowerCAmelCase : Any = model_class(_snake_case ) __lowerCAmelCase : Any = model(**self._prepare_for_class(_snake_case , _snake_case ) ) __lowerCAmelCase : Optional[Any] = outputs.hidden_states __lowerCAmelCase : Tuple = len(self.model_tester.depth ) self.assertEqual(len(_snake_case ) , _snake_case ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:] ) , [ self.model_tester.embed_dim[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ] , ) __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : str = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __lowerCAmelCase : Optional[Any] = True check_hidden_states_output(_snake_case , _snake_case , _snake_case ) def UpperCAmelCase__ ( self : str )->List[str]: '''simple docstring''' __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_snake_case ) def UpperCAmelCase__ ( self : Dict )->List[str]: '''simple docstring''' __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_snake_case ) @slow def UpperCAmelCase__ ( self : Dict )->Union[str, Any]: '''simple docstring''' for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : List[Any] = TFCvtModel.from_pretrained(_snake_case ) self.assertIsNotNone(_snake_case ) def _SCREAMING_SNAKE_CASE ( ) -> Tuple: __lowerCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class snake_case_ ( unittest.TestCase ): @cached_property def UpperCAmelCase__ ( self : Dict )->List[Any]: '''simple docstring''' return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def UpperCAmelCase__ ( self : List[str] )->Union[str, Any]: '''simple docstring''' __lowerCAmelCase : Any = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) __lowerCAmelCase : List[Any] = self.default_image_processor __lowerCAmelCase : Optional[int] = prepare_img() __lowerCAmelCase : int = image_processor(images=_snake_case , return_tensors="""tf""" ) # forward pass __lowerCAmelCase : Dict = model(**_snake_case ) # verify the logits __lowerCAmelCase : Dict = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , _snake_case ) __lowerCAmelCase : Any = tf.constant([0.9_285, 0.9_015, -0.3_150] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1E-4 ) )
232
0
"""simple docstring""" import importlib.metadata import operator import re import sys from typing import Optional from packaging import version A: Dict = { "<": operator.lt, "<=": operator.le, "==": operator.eq, "!=": operator.ne, ">=": operator.ge, ">": operator.gt, } def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int ): if got_ver is None or want_ver is None: raise ValueError( F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider" F" reinstalling {pkg}." ) if not ops[op](version.parse(UpperCamelCase ) , version.parse(UpperCamelCase ) ): raise ImportError( F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" ) def _snake_case ( UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): UpperCAmelCase : Optional[Any] = F"\n{hint}" if hint is not None else """""" # non-versioned check if re.match(R"""^[\w_\-\d]+$""" , UpperCamelCase ): UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = requirement, None, None else: UpperCAmelCase : Optional[Any] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , UpperCamelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but""" F" got {requirement}" ) UpperCAmelCase , UpperCAmelCase : Dict = match[0] UpperCAmelCase : int = want_full.split(""",""" ) # there could be multiple requirements UpperCAmelCase : Tuple = {} for w in want_range: UpperCAmelCase : Union[str, Any] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , UpperCamelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,""" F" but got {requirement}" ) UpperCAmelCase , UpperCAmelCase : Tuple = match[0] UpperCAmelCase : Tuple = want_ver if op not in ops: raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" ) # special case if pkg == "python": UpperCAmelCase : Union[str, Any] = """.""".join([str(UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) return # check if any version is installed try: UpperCAmelCase : Dict = importlib.metadata.version(UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( F"The '{requirement}' distribution was not found and is required by this application. {hint}" ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def _snake_case ( UpperCamelCase : Tuple ): UpperCAmelCase : Optional[Any] = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main""" return require_version(UpperCamelCase , UpperCamelCase )
109
"""simple docstring""" from math import pi, sqrt, tan def _snake_case ( UpperCamelCase : float ): if side_length < 0: raise ValueError("""surface_area_cube() only accepts non-negative values""" ) return 6 * side_length**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if length < 0 or breadth < 0 or height < 0: raise ValueError("""surface_area_cuboid() only accepts non-negative values""" ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def _snake_case ( UpperCamelCase : float ): if radius < 0: raise ValueError("""surface_area_sphere() only accepts non-negative values""" ) return 4 * pi * radius**2 def _snake_case ( UpperCamelCase : float ): if radius < 0: raise ValueError("""surface_area_hemisphere() only accepts non-negative values""" ) return 3 * pi * radius**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if radius < 0 or height < 0: raise ValueError("""surface_area_cone() only accepts non-negative values""" ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( """surface_area_conical_frustum() only accepts non-negative values""" ) UpperCAmelCase : Tuple = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if radius < 0 or height < 0: raise ValueError("""surface_area_cylinder() only accepts non-negative values""" ) return 2 * pi * radius * (height + radius) def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if torus_radius < 0 or tube_radius < 0: raise ValueError("""surface_area_torus() only accepts non-negative values""" ) if torus_radius < tube_radius: raise ValueError( """surface_area_torus() does not support spindle or self intersecting tori""" ) return 4 * pow(UpperCamelCase , 2 ) * torus_radius * tube_radius def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if length < 0 or width < 0: raise ValueError("""area_rectangle() only accepts non-negative values""" ) return length * width def _snake_case ( UpperCamelCase : float ): if side_length < 0: raise ValueError("""area_square() only accepts non-negative values""" ) return side_length**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if base < 0 or height < 0: raise ValueError("""area_triangle() only accepts non-negative values""" ) return (base * height) / 2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError("""area_triangle_three_sides() only accepts non-negative values""" ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError("""Given three sides do not form a triangle""" ) UpperCAmelCase : Union[str, Any] = (sidea + sidea + sidea) / 2 UpperCAmelCase : Union[str, Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if base < 0 or height < 0: raise ValueError("""area_parallelogram() only accepts non-negative values""" ) return base * height def _snake_case ( UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ): if basea < 0 or basea < 0 or height < 0: raise ValueError("""area_trapezium() only accepts non-negative values""" ) return 1 / 2 * (basea + basea) * height def _snake_case ( UpperCamelCase : float ): if radius < 0: raise ValueError("""area_circle() only accepts non-negative values""" ) return pi * radius**2 def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if radius_x < 0 or radius_y < 0: raise ValueError("""area_ellipse() only accepts non-negative values""" ) return pi * radius_x * radius_y def _snake_case ( UpperCamelCase : float , UpperCamelCase : float ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError("""area_rhombus() only accepts non-negative values""" ) return 1 / 2 * diagonal_a * diagonal_a def _snake_case ( UpperCamelCase : int , UpperCamelCase : float ): if not isinstance(UpperCamelCase , UpperCamelCase ) or sides < 3: raise ValueError( """area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides""" ) elif length < 0: raise ValueError( """area_reg_polygon() only accepts non-negative values as \ length of a side""" ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print("[DEMO] Areas of various geometric shapes: \n") print(f"""Rectangle: {area_rectangle(1_0, 2_0) = }""") print(f"""Square: {area_square(1_0) = }""") print(f"""Triangle: {area_triangle(1_0, 1_0) = }""") print(f"""Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }""") print(f"""Parallelogram: {area_parallelogram(1_0, 2_0) = }""") print(f"""Rhombus: {area_rhombus(1_0, 2_0) = }""") print(f"""Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }""") print(f"""Circle: {area_circle(2_0) = }""") print(f"""Ellipse: {area_ellipse(1_0, 2_0) = }""") print("\nSurface Areas of various geometric shapes: \n") print(f"""Cube: {surface_area_cube(2_0) = }""") print(f"""Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }""") print(f"""Sphere: {surface_area_sphere(2_0) = }""") print(f"""Hemisphere: {surface_area_hemisphere(2_0) = }""") print(f"""Cone: {surface_area_cone(1_0, 2_0) = }""") print(f"""Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }""") print(f"""Cylinder: {surface_area_cylinder(1_0, 2_0) = }""") print(f"""Torus: {surface_area_torus(2_0, 1_0) = }""") print(f"""Equilateral Triangle: {area_reg_polygon(3, 1_0) = }""") print(f"""Square: {area_reg_polygon(4, 1_0) = }""") print(f"""Reqular Pentagon: {area_reg_polygon(5, 1_0) = }""")
109
1
"""simple docstring""" import os import pytest from attr import dataclass __lowercase = '''us-east-1''' # defaults region @dataclass class _lowercase : """simple docstring""" lowercase__ = 42 lowercase__ = '''arn:aws:iam::558105141721:role/sagemaker_execution_role''' lowercase__ = { '''task_name''': '''mnli''', '''per_device_train_batch_size''': 16, '''per_device_eval_batch_size''': 16, '''do_train''': True, '''do_eval''': True, '''do_predict''': True, '''output_dir''': '''/opt/ml/model''', '''overwrite_output_dir''': True, '''max_steps''': 5_00, '''save_steps''': 55_00, } lowercase__ = {**hyperparameters, '''max_steps''': 10_00} @property def UpperCAmelCase_ ( self : List[Any] ) -> str: '''simple docstring''' if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def UpperCAmelCase_ ( self : Dict ) -> str: '''simple docstring''' return f"""{self.framework}-transfromers-test""" @property def UpperCAmelCase_ ( self : Dict ) -> str: '''simple docstring''' return f"""./tests/sagemaker/scripts/{self.framework}""" @property def UpperCAmelCase_ ( self : List[str] ) -> str: '''simple docstring''' if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='''class''' ) def lowerCAmelCase (__UpperCamelCase : Optional[Any] ): """simple docstring""" __UpperCamelCase =SageMakerTestEnvironment(framework=request.cls.framework )
85
"""simple docstring""" import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self : int ) -> int: '''simple docstring''' __UpperCamelCase ='''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() __UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) __UpperCamelCase ={ '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } __UpperCamelCase ={ '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 16000, '''return_attention_mask''': False, '''do_normalize''': True, } __UpperCamelCase =tempfile.mkdtemp() __UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __UpperCamelCase =os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' ) # load decoder from hub __UpperCamelCase ='''hf-internal-testing/ngram-beam-search-decoder''' def UpperCAmelCase_ ( self : Tuple , **UpperCamelCase__ : Tuple ) -> List[str]: '''simple docstring''' __UpperCamelCase =self.add_kwargs_tokens_map.copy() kwargs.update(UpperCamelCase__ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] , **UpperCamelCase__ : List[Any] ) -> Any: '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def UpperCAmelCase_ ( self : List[Any] , **UpperCamelCase__ : Union[str, Any] ) -> str: '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCamelCase__ ) def UpperCAmelCase_ ( self : Union[str, Any] ) -> int: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]: '''simple docstring''' __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) processor.save_pretrained(self.tmpdirname ) __UpperCamelCase =WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , UpperCamelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> str: '''simple docstring''' __UpperCamelCase =WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __UpperCamelCase =WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __UpperCamelCase =self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(UpperCamelCase__ , '''include''' ): WavaVecaProcessorWithLM( tokenizer=UpperCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def UpperCAmelCase_ ( self : List[Any] ) -> Dict: '''simple docstring''' __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) __UpperCamelCase =floats_list((3, 1000) ) __UpperCamelCase =feature_extractor(UpperCamelCase__ , return_tensors='''np''' ) __UpperCamelCase =processor(UpperCamelCase__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase_ ( self : List[str] ) -> Dict: '''simple docstring''' __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) __UpperCamelCase ='''This is a test string''' __UpperCamelCase =processor(text=UpperCamelCase__ ) __UpperCamelCase =tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase_ ( self : Union[str, Any] , UpperCamelCase__ : List[str]=(2, 10, 16) , UpperCamelCase__ : Union[str, Any]=77 ) -> int: '''simple docstring''' np.random.seed(UpperCamelCase__ ) return np.random.rand(*UpperCamelCase__ ) def UpperCAmelCase_ ( self : int ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) __UpperCamelCase =self._get_dummy_logits(shape=(10, 16) , seed=13 ) __UpperCamelCase =processor.decode(UpperCamelCase__ ) __UpperCamelCase =decoder.decode_beams(UpperCamelCase__ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def UpperCAmelCase_ ( self : Dict , UpperCamelCase__ : Optional[Any] ) -> List[str]: '''simple docstring''' __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) __UpperCamelCase =self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __UpperCamelCase =processor.batch_decode(UpperCamelCase__ ) else: with get_context(UpperCamelCase__ ).Pool() as pool: __UpperCamelCase =processor.batch_decode(UpperCamelCase__ , UpperCamelCase__ ) __UpperCamelCase =list(UpperCamelCase__ ) with get_context('''fork''' ).Pool() as p: __UpperCamelCase =decoder.decode_beams_batch(UpperCamelCase__ , UpperCamelCase__ ) __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =[], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(UpperCamelCase__ , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(UpperCamelCase__ , decoded_processor.logit_score ) self.assertListEqual(UpperCamelCase__ , decoded_processor.lm_score ) def UpperCAmelCase_ ( self : Dict ) -> List[str]: '''simple docstring''' __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) __UpperCamelCase =self._get_dummy_logits() __UpperCamelCase =15 __UpperCamelCase =-20.0 __UpperCamelCase =-4.0 __UpperCamelCase =processor.batch_decode( UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , ) __UpperCamelCase =decoded_processor_out.text __UpperCamelCase =list(UpperCamelCase__ ) with get_context('''fork''' ).Pool() as pool: __UpperCamelCase =decoder.decode_beams_batch( UpperCamelCase__ , UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , ) __UpperCamelCase =[d[0][0] for d in decoded_decoder_out] __UpperCamelCase =[d[0][2] for d in decoded_decoder_out] __UpperCamelCase =[d[0][3] for d in decoded_decoder_out] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , UpperCamelCase__ ) self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.0_54, -18.4_47] , UpperCamelCase__ , atol=1E-3 ) ) self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.5_54, -13.94_74] , UpperCamelCase__ , atol=1E-3 ) ) def UpperCAmelCase_ ( self : Any ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) __UpperCamelCase =self._get_dummy_logits() __UpperCamelCase =2.0 __UpperCamelCase =5.0 __UpperCamelCase =-20.0 __UpperCamelCase =True __UpperCamelCase =processor.batch_decode( UpperCamelCase__ , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , ) __UpperCamelCase =decoded_processor_out.text __UpperCamelCase =list(UpperCamelCase__ ) decoder.reset_params( alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , ) with get_context('''fork''' ).Pool() as pool: __UpperCamelCase =decoder.decode_beams_batch( UpperCamelCase__ , UpperCamelCase__ , ) __UpperCamelCase =[d[0][0] for d in decoded_decoder_out] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , UpperCamelCase__ ) __UpperCamelCase =processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , UpperCamelCase__ ) def UpperCAmelCase_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __UpperCamelCase =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCamelCase =processor.decoder.model_container[processor.decoder._model_key] __UpperCamelCase =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __UpperCamelCase =os.listdir(UpperCamelCase__ ) __UpperCamelCase =['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase =snapshot_download('''hf-internal-testing/processor_with_lm''' ) __UpperCamelCase =WavaVecaProcessorWithLM.from_pretrained(UpperCamelCase__ ) __UpperCamelCase =processor.decoder.model_container[processor.decoder._model_key] __UpperCamelCase =Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() __UpperCamelCase =os.listdir(UpperCamelCase__ ) __UpperCamelCase =os.listdir(UpperCamelCase__ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase_ ( self : Optional[int] ) -> List[str]: '''simple docstring''' __UpperCamelCase =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCamelCase =AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCamelCase =floats_list((3, 1000) ) __UpperCamelCase =processor_wavaveca(UpperCamelCase__ , return_tensors='''np''' ) __UpperCamelCase =processor_auto(UpperCamelCase__ , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) __UpperCamelCase =self._get_dummy_logits() __UpperCamelCase =processor_wavaveca.batch_decode(UpperCamelCase__ ) __UpperCamelCase =processor_auto.batch_decode(UpperCamelCase__ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def UpperCAmelCase_ ( self : List[Any] ) -> int: '''simple docstring''' __UpperCamelCase =self.get_feature_extractor() __UpperCamelCase =self.get_tokenizer() __UpperCamelCase =self.get_decoder() __UpperCamelCase =WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def UpperCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] ) -> int: '''simple docstring''' __UpperCamelCase =[d[key] for d in offsets] return retrieved_list def UpperCAmelCase_ ( self : Dict ) -> List[str]: '''simple docstring''' __UpperCamelCase =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCamelCase =self._get_dummy_logits()[0] __UpperCamelCase =processor.decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCamelCase =WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) __UpperCamelCase =self._get_dummy_logits() __UpperCamelCase =processor.batch_decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def UpperCAmelCase_ ( self : Optional[int] ) -> Tuple: '''simple docstring''' import torch __UpperCamelCase =load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=UpperCamelCase__ ) __UpperCamelCase =ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) ) __UpperCamelCase =iter(UpperCamelCase__ ) __UpperCamelCase =next(UpperCamelCase__ ) __UpperCamelCase =AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) __UpperCamelCase =WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __UpperCamelCase =processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): __UpperCamelCase =model(UpperCamelCase__ ).logits.cpu().numpy() __UpperCamelCase =processor.decode(logits[0] , output_word_offsets=UpperCamelCase__ ) __UpperCamelCase =model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __UpperCamelCase =[ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] __UpperCamelCase ='''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , UpperCamelCase__ ) self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , output.text ) # output times __UpperCamelCase =torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''start_time''' ) ) __UpperCamelCase =torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''end_time''' ) ) # fmt: off __UpperCamelCase =torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) __UpperCamelCase =torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.01 ) ) self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.01 ) )
85
1
import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' UpperCamelCase__ : List[Any] = None UpperCamelCase__ : List[str] = None @property def _A ( self ): '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_A , 'feature_size' ) ) self.assertTrue(hasattr(_A , 'sampling_rate' ) ) self.assertTrue(hasattr(_A , 'padding_value' ) ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common() __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_A ) == len(_A ) for x, y in zip(_A , processed_features[input_name] ) ) ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A ) __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) __SCREAMING_SNAKE_CASE = processed_features[input_name] if len(batch_features_input.shape ) < 3: __SCREAMING_SNAKE_CASE = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A ) __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) __SCREAMING_SNAKE_CASE = processed_features[input_name] if len(batch_features_input.shape ) < 3: __SCREAMING_SNAKE_CASE = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_A ) __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} , tensor_type='tf' ) __SCREAMING_SNAKE_CASE = processed_features[input_name] if len(batch_features_input.shape ) < 3: __SCREAMING_SNAKE_CASE = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def _A ( self , _A=False ): '''simple docstring''' def _inputs_have_equal_length(_A ): __SCREAMING_SNAKE_CASE = len(input[0] ) for input_slice in input[1:]: if len(_A ) != length: return False return True def _inputs_are_equal(_A , _A ): if len(_A ) != len(_A ): return False for input_slice_a, input_slice_a in zip(_A , _A ): if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1e-3 ): return False return True __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A ) __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.seq_length_diff __SCREAMING_SNAKE_CASE = self.feat_extract_tester.max_seq_length + pad_diff __SCREAMING_SNAKE_CASE = self.feat_extract_tester.min_seq_length __SCREAMING_SNAKE_CASE = self.feat_extract_tester.batch_size __SCREAMING_SNAKE_CASE = self.feat_extract_tester.feature_size # test padding for List[int] + numpy __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding=_A ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='max_length' , max_length=len(speech_inputs[-1] ) ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' , return_tensors='np' ) __SCREAMING_SNAKE_CASE = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_A ): feat_extract.pad(_A , padding='max_length' )[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=_A , return_tensors='np' ) __SCREAMING_SNAKE_CASE = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_are_equal(_A , _A ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , pad_to_multiple_of=10 ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' , pad_to_multiple_of=10 ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , pad_to_multiple_of=10 , max_length=_A ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , pad_to_multiple_of=10 , max_length=_A , return_tensors='np' , ) __SCREAMING_SNAKE_CASE = input_a[input_name] self.assertTrue(all(len(_A ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_A , _A ) ) __SCREAMING_SNAKE_CASE = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(_A ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct __SCREAMING_SNAKE_CASE = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def _A ( self , _A=False ): '''simple docstring''' def _inputs_have_equal_length(_A ): __SCREAMING_SNAKE_CASE = len(input[0] ) for input_slice in input[1:]: if len(_A ) != length: return False return True def _inputs_are_equal(_A , _A ): if len(_A ) != len(_A ): return False for input_slice_a, input_slice_a in zip(_A , _A ): if not np.allclose(np.asarray(_A ) , np.asarray(_A ) , atol=1e-3 ): return False return True __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common(numpify=_A ) __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} ) # truncate to smallest __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_A ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='max_length' , max_length=len(speech_inputs[0] ) ) __SCREAMING_SNAKE_CASE = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertFalse(_inputs_have_equal_length(_A ) ) # truncate to smallest with np __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_A , ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' ) __SCREAMING_SNAKE_CASE = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_A ) ) # truncate to middle __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_A , return_tensors='np' , ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_A ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' ) __SCREAMING_SNAKE_CASE = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertTrue(_inputs_are_equal(_A , _A ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_A ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_A ): feat_extract.pad(_A , truncation=_A )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_A ): feat_extract.pad(_A , padding='longest' , truncation=_A )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_A ): feat_extract.pad(_A , padding='longest' , truncation=_A )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_A ): feat_extract.pad(_A , padding='max_length' , truncation=_A )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy __SCREAMING_SNAKE_CASE = 12 __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , truncation=_A , ) __SCREAMING_SNAKE_CASE = input_a[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_A , ) __SCREAMING_SNAKE_CASE = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of __SCREAMING_SNAKE_CASE = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: __SCREAMING_SNAKE_CASE = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_A ) ) self.assertFalse(_inputs_have_equal_length(_A ) ) def _A ( self ): '''simple docstring''' self._check_padding(numpify=_A ) def _A ( self ): '''simple docstring''' self._check_padding(numpify=_A ) def _A ( self ): '''simple docstring''' self._check_truncation(numpify=_A ) def _A ( self ): '''simple docstring''' self._check_truncation(numpify=_A ) @require_torch def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common() __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} ) __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' , return_tensors='np' )[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common() __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} ) __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' , return_tensors='np' )[input_name] __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' , return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feat_extract_dict __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_A ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common() __SCREAMING_SNAKE_CASE = [len(_A ) for x in speech_inputs] __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} ) __SCREAMING_SNAKE_CASE = feat_extract.pad(_A , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , _A ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _A ) def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.feat_extract_dict __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = self.feature_extraction_class(**_A ) __SCREAMING_SNAKE_CASE = self.feat_extract_tester.prepare_inputs_for_common() __SCREAMING_SNAKE_CASE = [len(_A ) for x in speech_inputs] __SCREAMING_SNAKE_CASE = feat_extract.model_input_names[0] __SCREAMING_SNAKE_CASE = BatchFeature({input_name: speech_inputs} ) __SCREAMING_SNAKE_CASE = min(_A ) __SCREAMING_SNAKE_CASE = feat_extract.pad( _A , padding='max_length' , max_length=_A , truncation=_A , return_tensors='np' ) self.assertIn('attention_mask' , _A ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
257
import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = process __SCREAMING_SNAKE_CASE = params def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.dataset[i] __SCREAMING_SNAKE_CASE = self.process(_A , **self.params ) return processed class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A , _A=None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = loader __SCREAMING_SNAKE_CASE = infer __SCREAMING_SNAKE_CASE = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = loader_batch_size # Internal bookkeeping __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def __len__( self ): '''simple docstring''' return len(self.loader ) def __iter__( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = iter(self.loader ) return self def _A ( self ): '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __SCREAMING_SNAKE_CASE = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __SCREAMING_SNAKE_CASE = {} for k, element in self._loader_batch_data.items(): if isinstance(_A , _A ): # Convert ModelOutput to tuple first __SCREAMING_SNAKE_CASE = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __SCREAMING_SNAKE_CASE = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __SCREAMING_SNAKE_CASE = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __SCREAMING_SNAKE_CASE = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __SCREAMING_SNAKE_CASE = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __SCREAMING_SNAKE_CASE = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __SCREAMING_SNAKE_CASE = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __SCREAMING_SNAKE_CASE = self._loader_batch_data.__class__(_A ) self._loader_batch_index += 1 return result def _A ( self ): '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __SCREAMING_SNAKE_CASE = next(self.iterator ) __SCREAMING_SNAKE_CASE = self.infer(_A , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(_A , torch.Tensor ): __SCREAMING_SNAKE_CASE = processed else: __SCREAMING_SNAKE_CASE = list(processed.keys() )[0] __SCREAMING_SNAKE_CASE = processed[key] if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE = len(_A ) else: __SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __SCREAMING_SNAKE_CASE = observed_batch_size # Setting internal index to unwrap the batch __SCREAMING_SNAKE_CASE = processed __SCREAMING_SNAKE_CASE = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A , _A=None ): '''simple docstring''' super().__init__(_A , _A , _A ) def __iter__( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = iter(self.loader ) __SCREAMING_SNAKE_CASE = None return self def _A ( self ): '''simple docstring''' if self.subiterator is None: __SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __SCREAMING_SNAKE_CASE = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) __SCREAMING_SNAKE_CASE = next(self.subiterator ) return processed class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __iter__( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = iter(self.loader ) return self def _A ( self ): '''simple docstring''' __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __SCREAMING_SNAKE_CASE = self.loader_batch_item() __SCREAMING_SNAKE_CASE = item.pop('is_last' ) accumulator.append(_A ) if is_last: return accumulator while not is_last: __SCREAMING_SNAKE_CASE = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(_A , torch.Tensor ): __SCREAMING_SNAKE_CASE = processed else: __SCREAMING_SNAKE_CASE = list(processed.keys() )[0] __SCREAMING_SNAKE_CASE = processed[key] if isinstance(_A , _A ): __SCREAMING_SNAKE_CASE = len(_A ) else: __SCREAMING_SNAKE_CASE = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __SCREAMING_SNAKE_CASE = observed_batch_size __SCREAMING_SNAKE_CASE = processed __SCREAMING_SNAKE_CASE = 0 while self._loader_batch_index < self.loader_batch_size: __SCREAMING_SNAKE_CASE = self.loader_batch_item() __SCREAMING_SNAKE_CASE = item.pop('is_last' ) accumulator.append(_A ) if is_last: return accumulator else: __SCREAMING_SNAKE_CASE = processed __SCREAMING_SNAKE_CASE = item.pop('is_last' ) accumulator.append(_A ) return accumulator class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = key def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self , _A ): '''simple docstring''' return self.dataset[i][self.key] class UpperCAmelCase_ ( UpperCamelCase_ ): '''simple docstring''' def __init__( self , _A , _A , _A ): '''simple docstring''' __SCREAMING_SNAKE_CASE = dataset __SCREAMING_SNAKE_CASE = keya __SCREAMING_SNAKE_CASE = keya def __len__( self ): '''simple docstring''' return len(self.dataset ) def __getitem__( self , _A ): '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
257
1
'''simple docstring''' import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) _lowercase = [ ["""attention""", """attn"""], ["""encoder_attention""", """encoder_attn"""], ["""q_lin""", """q_proj"""], ["""k_lin""", """k_proj"""], ["""v_lin""", """v_proj"""], ["""out_lin""", """out_proj"""], ["""norm_embeddings""", """layernorm_embedding"""], ["""position_embeddings""", """embed_positions"""], ["""embeddings""", """embed_tokens"""], ["""ffn.lin""", """fc"""], ] def A (__lowerCamelCase :Dict ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: _lowerCAmelCase = k.replace(__lowerCamelCase , __lowerCamelCase ) if k.startswith("""encoder""" ): _lowerCAmelCase = k.replace(""".attn""" , """.self_attn""" ) _lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" ) _lowerCAmelCase = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): _lowerCAmelCase = k.replace("""norm1""" , """self_attn_layer_norm""" ) _lowerCAmelCase = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) _lowerCAmelCase = k.replace("""norm3""" , """final_layer_norm""" ) return k def A (__lowerCamelCase :Union[str, Any] ): _lowerCAmelCase = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: _lowerCAmelCase = sd.pop(__lowerCamelCase ) _lowerCAmelCase = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd _lowerCAmelCase = v _lowercase = ["""START"""] @torch.no_grad() def A (__lowerCamelCase :str , __lowerCamelCase :str , __lowerCamelCase :Any ): _lowerCAmelCase = torch.load(__lowerCamelCase , map_location="""cpu""" ) _lowerCAmelCase = model["""model"""] _lowerCAmelCase = BlenderbotConfig.from_json_file(__lowerCamelCase ) _lowerCAmelCase = BlenderbotForConditionalGeneration(__lowerCamelCase ) _lowerCAmelCase = m.model.state_dict().keys() _lowerCAmelCase = [] _lowerCAmelCase = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue _lowerCAmelCase = rename_state_dict_key(__lowerCamelCase ) if new_k not in valid_keys: failures.append([k, new_k] ) else: _lowerCAmelCase = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(__lowerCamelCase ) m.model.load_state_dict(__lowerCamelCase , strict=__lowerCamelCase ) m.half() m.save_pretrained(__lowerCamelCase ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""") parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""") parser.add_argument( """--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use""" ) _lowercase = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
229
'''simple docstring''' import heapq as hq import math from collections.abc import Iterator class UpperCAmelCase_ : '''simple docstring''' def __init__( self , _lowercase ): """simple docstring""" _lowerCAmelCase = str(id_ ) _lowerCAmelCase = None _lowerCAmelCase = None _lowerCAmelCase = [] _lowerCAmelCase = {} # {vertex:distance} def __lt__( self , _lowercase ): """simple docstring""" return self.key < other.key def __repr__( self ): """simple docstring""" return self.id def _lowercase ( self , _lowercase ): """simple docstring""" self.neighbors.append(_lowercase ) def _lowercase ( self , _lowercase , _lowercase ): """simple docstring""" _lowerCAmelCase = weight def A (__lowerCamelCase :List[Any] , __lowerCamelCase :Union[str, Any] , __lowerCamelCase :Dict , __lowerCamelCase :Optional[int] ): # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __lowerCamelCase ) graph[b - 1].add_edge(graph[a - 1] , __lowerCamelCase ) def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ): _lowerCAmelCase = [] for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = graph[:] while q: _lowerCAmelCase = min(__lowerCamelCase ) q.remove(__lowerCamelCase ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] for i in range(1 , len(__lowerCamelCase ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def A (__lowerCamelCase :list , __lowerCamelCase :Vertex ): for u in graph: _lowerCAmelCase = math.inf _lowerCAmelCase = None _lowerCAmelCase = 0 _lowerCAmelCase = list(__lowerCamelCase ) hq.heapify(__lowerCamelCase ) while h: _lowerCAmelCase = hq.heappop(__lowerCamelCase ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _lowerCAmelCase = u _lowerCAmelCase = u.edges[v.id] hq.heapify(__lowerCamelCase ) for i in range(1 , len(__lowerCamelCase ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def A (): pass if __name__ == "__main__": import doctest doctest.testmod()
229
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __a = { "configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"], "tokenization_xlm": ["XLMTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys __a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
35
import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) def lowerCAmelCase__ ( a__: List[Any] , a__: Union[str, Any]=1_0 ) -> Any: '''simple docstring''' _UpperCAmelCase = [] for _ in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() return lrs def lowerCAmelCase__ ( a__: List[str] , a__: Any=1_0 ) -> List[Any]: '''simple docstring''' _UpperCAmelCase = [] for step in range(a__ ): lrs.append(scheduler.get_lr()[0] ) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: _UpperCAmelCase = os.path.join(a__ , 'schedule.bin' ) torch.save(scheduler.state_dict() , a__ ) _UpperCAmelCase = torch.load(a__ ) scheduler.load_state_dict(a__ ) return lrs @require_torch class __a ( unittest.TestCase ): def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> List[Any]: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 ) for _ in range(100 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=_SCREAMING_SNAKE_CASE ) _UpperCAmelCase = torch.tensor([0.4, 0.2, -0.5] ) _UpperCAmelCase = nn.MSELoss() # No warmup, constant schedule, no gradient clipping _UpperCAmelCase = Adafactor( params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=_SCREAMING_SNAKE_CASE , weight_decay=0.0 , relative_step=_SCREAMING_SNAKE_CASE , scale_parameter=_SCREAMING_SNAKE_CASE , warmup_init=_SCREAMING_SNAKE_CASE , ) for _ in range(1000 ): _UpperCAmelCase = criterion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 ) @require_torch class __a ( unittest.TestCase ): _a : Dict = nn.Linear(50 , 50 ) if is_torch_available() else None _a : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None _a : List[Any] = 10 def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: """simple docstring""" self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assertAlmostEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , delta=_SCREAMING_SNAKE_CASE , msg=_SCREAMING_SNAKE_CASE ) def UpperCAmelCase__ ( self ) -> Dict: """simple docstring""" _UpperCAmelCase = {'num_warmup_steps': 2, 'num_training_steps': 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) _UpperCAmelCase = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, 'power': 2.0, 'lr_end': 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), } for scheduler_func, data in scheds.items(): _UpperCAmelCase , _UpperCAmelCase = data _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 ) _UpperCAmelCase = unwrap_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListAlmostEqual( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tol=1e-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , ) _UpperCAmelCase = scheduler_func(self.optimizer , **_SCREAMING_SNAKE_CASE ) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(_SCREAMING_SNAKE_CASE ) # wrap to test picklability of the schedule _UpperCAmelCase = unwrap_and_save_reload_schedule(_SCREAMING_SNAKE_CASE , self.num_steps ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , msg=f'''failed for {scheduler_func} in save and reload''' ) class __a : def __init__( self , _SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" _UpperCAmelCase = fn def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" return self.fn(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _UpperCAmelCase = list(map(self , scheduler.lr_lambdas ) )
329
0
import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer lowercase__ : Dict = logging.get_logger(__name__) lowercase__ : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart lowercase__ : List[str] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } lowercase__ : Any = { "facebook/bart-base": 1_024, "facebook/bart-large": 1_024, "facebook/bart-large-mnli": 1_024, "facebook/bart-large-cnn": 1_024, "facebook/bart-large-xsum": 1_024, "yjernite/bart_eli5": 1_024, } class a__ ( UpperCamelCase__ ): a : int = VOCAB_FILES_NAMES a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a : Union[str, Any] = ["""input_ids""", """attention_mask"""] a : List[str] = BartTokenizer def __init__( self , A=None , A=None , A=None , A="replace" , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=False , A=True , **A , ) -> Any: '''simple docstring''' super().__init__( A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , ) a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , A ) != add_prefix_space: a = getattr(A , pre_tok_state.pop("type" ) ) a = add_prefix_space a = pre_tok_class(**A ) a = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` a = "post_processor" a = getattr(self.backend_tokenizer , A , A ) if tokenizer_component_instance: a = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: a = tuple(state["sep"] ) if "cls" in state: a = tuple(state["cls"] ) a = False if state.get("add_prefix_space" , A ) != add_prefix_space: a = add_prefix_space a = True if state.get("trim_offsets" , A ) != trim_offsets: a = trim_offsets a = True if changes_to_apply: a = getattr(A , state.pop("type" ) ) a = component_class(**A ) setattr(self.backend_tokenizer , A , A ) @property def lowerCAmelCase_ ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def lowerCAmelCase_ ( self , A ) -> List[Any]: '''simple docstring''' a = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value a = value def lowerCAmelCase_ ( self , *A , **A ) -> BatchEncoding: '''simple docstring''' a = kwargs.get("is_split_into_words" , A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A , **A ) def lowerCAmelCase_ ( self , *A , **A ) -> BatchEncoding: '''simple docstring''' a = kwargs.get("is_split_into_words" , A ) if is_split_into_words and not self.add_prefix_space: raise ValueError( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*A , **A ) def lowerCAmelCase_ ( self , A , A = None ) -> Tuple[str]: '''simple docstring''' a = self._tokenizer.model.save(A , name=A ) return tuple(A ) def lowerCAmelCase_ ( self , A , A=None ) -> int: '''simple docstring''' a = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def lowerCAmelCase_ ( self , A , A = None ) -> List[int]: '''simple docstring''' a = [self.sep_token_id] a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
180
lowercase__ : Optional[int] = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
180
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : Any = logging.get_logger(__name__) lowercase : Any = { 'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class lowerCamelCase__ ( __lowercase): '''simple docstring''' _A = 'gpt_neox' def __init__( self :int , a :Tuple=5_0_4_3_2 , a :Tuple=6_1_4_4 , a :Union[str, Any]=4_4 , a :int=6_4 , a :List[Any]=2_4_5_7_6 , a :str="gelu" , a :int=0.25 , a :Tuple=1_0_0_0_0 , a :Dict=0.0 , a :List[Any]=0.0 , a :Dict=0.1 , a :Union[str, Any]=2_0_4_8 , a :int=0.02 , a :Optional[Any]=1E-5 , a :Optional[Any]=True , a :Dict=0 , a :Tuple=2 , a :List[str]=False , a :List[Any]=True , a :List[Any]=None , **a :Union[str, Any] , ) -> Optional[Any]: super().__init__(bos_token_id=a , eos_token_id=a , **a ) __UpperCamelCase : List[str] = vocab_size __UpperCamelCase : Optional[Any] = max_position_embeddings __UpperCamelCase : Optional[Any] = hidden_size __UpperCamelCase : Optional[Any] = num_hidden_layers __UpperCamelCase : str = num_attention_heads __UpperCamelCase : Optional[int] = intermediate_size __UpperCamelCase : Tuple = hidden_act __UpperCamelCase : Union[str, Any] = rotary_pct __UpperCamelCase : List[str] = rotary_emb_base __UpperCamelCase : List[Any] = attention_dropout __UpperCamelCase : List[str] = hidden_dropout __UpperCamelCase : int = classifier_dropout __UpperCamelCase : int = initializer_range __UpperCamelCase : str = layer_norm_eps __UpperCamelCase : Optional[Any] = use_cache __UpperCamelCase : Optional[int] = tie_word_embeddings __UpperCamelCase : Dict = use_parallel_residual __UpperCamelCase : Tuple = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( "The hidden size is not divisble by the number of attention heads! Make sure to update them!" ) def _lowerCamelCase ( self :Dict ) -> Union[str, Any]: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , a ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " f'got {self.rope_scaling}' ) __UpperCamelCase : Any = self.rope_scaling.get("type" , a ) __UpperCamelCase : Dict = self.rope_scaling.get("factor" , a ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' ) if rope_scaling_factor is None or not isinstance(a , a ) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
232
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , ) @pytest.mark.usefixtures('sm_env') @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ]) class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self :List[Any] ) -> Any: if self.framework == "pytorch": subprocess.run( f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding="utf-8" , check=a , ) assert hasattr(self , "env" ) def _lowerCamelCase ( self :Any , a :Optional[Any] ) -> Dict: __UpperCamelCase : str = f'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings __UpperCamelCase : Optional[int] = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=a , instance_count=a , instance_type=self.instance_type , debugger_hook_config=a , hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=a , py_version="py36" , ) def _lowerCamelCase ( self :Dict , a :Dict ) -> Optional[int]: TrainingJobAnalytics(a ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def _lowerCamelCase ( self :Dict , a :Tuple ) -> List[Any]: # create estimator __UpperCamelCase : int = self.create_estimator(a ) # run training estimator.fit() # result dataframe __UpperCamelCase : Optional[int] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __UpperCamelCase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] ) __UpperCamelCase : Tuple = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __UpperCamelCase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy ) assert all(t <= self.results["eval_loss"] for t in eval_loss ) # dump tests result into json file to share in PR with open(f'{estimator.latest_training_job.name}.json' , "w" ) as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a )
232
1
import pytest _UpperCAmelCase : List[Any] = "__dummy_dataset1__" _UpperCAmelCase : Union[str, Any] = "\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n" @pytest.fixture def UpperCAmelCase__ ( ): return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def UpperCAmelCase__ ( ): return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): lowercase :Tuple = dataset_loading_script_name lowercase :Dict = tmp_path / "datasets" / script_name script_dir.mkdir(parents=lowerCamelCase ) lowercase :int = script_dir / F"{script_name}.py" with open(lowerCamelCase, "w" ) as f: f.write(lowerCamelCase ) return str(lowerCamelCase )
158
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class __lowerCAmelCase : def __init__( self: Dict , _lowerCAmelCase: Dict , _lowerCAmelCase: Any=13 , _lowerCAmelCase: List[str]=10 , _lowerCAmelCase: Dict=3 , _lowerCAmelCase: Dict=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Any=2 , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[Any]=32 , _lowerCAmelCase: Union[str, Any]=5 , _lowerCAmelCase: str=4 , _lowerCAmelCase: str=37 , _lowerCAmelCase: Any="gelu" , _lowerCAmelCase: List[Any]=0.1 , _lowerCAmelCase: Union[str, Any]=0.1 , _lowerCAmelCase: Union[str, Any]=10 , _lowerCAmelCase: List[str]=0.02 , _lowerCAmelCase: Union[str, Any]=0.9 , _lowerCAmelCase: int=None , ): lowercase :Dict = parent lowercase :Optional[int] = batch_size lowercase :List[Any] = image_size lowercase :int = num_channels lowercase :Any = patch_size lowercase :str = tubelet_size lowercase :Optional[Any] = num_frames lowercase :Optional[Any] = is_training lowercase :Tuple = use_labels lowercase :Union[str, Any] = hidden_size lowercase :Any = num_hidden_layers lowercase :Optional[Any] = num_attention_heads lowercase :Optional[int] = intermediate_size lowercase :Union[str, Any] = hidden_act lowercase :int = hidden_dropout_prob lowercase :List[str] = attention_probs_dropout_prob lowercase :List[str] = type_sequence_label_size lowercase :Union[str, Any] = initializer_range lowercase :Optional[Any] = mask_ratio lowercase :List[Any] = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase :List[str] = (image_size // patch_size) ** 2 lowercase :Dict = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase :Optional[Any] = int(mask_ratio * self.seq_length ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase :Tuple = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase :Dict = None if self.use_labels: lowercase :Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase :Any = self.get_config() return config, pixel_values, labels def SCREAMING_SNAKE_CASE ( self: List[str] ): return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , ) def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ): lowercase :List[Any] = VideoMAEModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() lowercase :int = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Optional[int] ): lowercase :str = VideoMAEForPreTraining(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase :Tuple = torch.ones((self.num_masks,) ) lowercase :str = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase :Optional[int] = mask.expand(self.batch_size , -1 ).bool() lowercase :List[Any] = model(_lowerCAmelCase , _lowerCAmelCase ) # model only returns predictions for masked patches lowercase :Any = mask.sum().item() lowercase :Optional[Any] = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def SCREAMING_SNAKE_CASE ( self: Optional[int] ): lowercase :Union[str, Any] = self.prepare_config_and_inputs() lowercase , lowercase , lowercase :str = config_and_inputs lowercase :Tuple = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase): _a = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) _a = ( {'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification} if is_torch_available() else {} ) _a = False _a = False _a = False _a = False def SCREAMING_SNAKE_CASE ( self: Dict ): lowercase :str = VideoMAEModelTester(self ) lowercase :str = ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 ) def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any]=False ): lowercase :Union[str, Any] = copy.deepcopy(_lowerCAmelCase ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase :Tuple = torch.ones((self.model_tester.num_masks,) ) lowercase :Optional[Any] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase :List[Any] = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase :Optional[int] = bool_masked_pos.to(_lowerCAmelCase ) if return_labels: if model_class in [ *get_values(_lowerCAmelCase ), ]: lowercase :List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) return inputs_dict def SCREAMING_SNAKE_CASE ( self: Tuple ): self.config_tester.run_common_tests() @unittest.skip(reason="VideoMAE does not use inputs_embeds" ) def SCREAMING_SNAKE_CASE ( self: str ): pass def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): lowercase , lowercase :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Union[str, Any] = model_class(_lowerCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase :List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Tuple = model_class(_lowerCAmelCase ) lowercase :Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase :Optional[int] = [*signature.parameters.keys()] lowercase :List[Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: List[str] ): lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: int ): lowercase :int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) @slow def SCREAMING_SNAKE_CASE ( self: Any ): for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase :int = VideoMAEModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self: str ): if not self.has_attentions: pass else: lowercase , lowercase :Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase :Optional[Any] = True for model_class in self.all_model_classes: lowercase :Tuple = self.model_tester.seq_length - self.model_tester.num_masks lowercase :Dict = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase :Any = True lowercase :Tuple = False lowercase :str = True lowercase :List[Any] = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :List[Any] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase :Any = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase :Optional[Any] = True lowercase :str = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :Any = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase :List[Any] = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase :int = len(_lowerCAmelCase ) # Check attention is always last and order is fine lowercase :int = True lowercase :Union[str, Any] = True lowercase :int = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :Optional[int] = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) self.assertEqual(out_len + 1 , len(_lowerCAmelCase ) ) lowercase :Tuple = outputs.attentions self.assertEqual(len(_lowerCAmelCase ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def SCREAMING_SNAKE_CASE ( self: str ): def check_hidden_states_output(_lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ): lowercase :Dict = model_class(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() with torch.no_grad(): lowercase :Tuple = model(**self._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase ) ) lowercase :Tuple = outputs.hidden_states lowercase :Any = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(_lowerCAmelCase ) , _lowerCAmelCase ) lowercase :str = self.model_tester.seq_length - self.model_tester.num_masks lowercase :List[str] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase , lowercase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Any = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase :Optional[Any] = True check_hidden_states_output(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def SCREAMING_SNAKE_CASE ( self: List[Any] ): pass def UpperCAmelCase__ ( ): lowercase :str = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) lowercase :List[str] = np.load(lowerCamelCase ) return list(lowerCamelCase ) @require_torch @require_vision class __lowerCAmelCase ( unittest.TestCase): @cached_property def SCREAMING_SNAKE_CASE ( self: Dict ): # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def SCREAMING_SNAKE_CASE ( self: Any ): lowercase :Tuple = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics" ).to( _lowerCAmelCase ) lowercase :Tuple = self.default_image_processor lowercase :Optional[Any] = prepare_video() lowercase :str = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase :List[str] = model(**_lowerCAmelCase ) # verify the logits lowercase :Optional[int] = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) lowercase :Optional[int] = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1e-4 ) ) @slow def SCREAMING_SNAKE_CASE ( self: Tuple ): lowercase :List[str] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" ).to(_lowerCAmelCase ) lowercase :List[Any] = self.default_image_processor lowercase :str = prepare_video() lowercase :Optional[int] = image_processor(_lowerCAmelCase , return_tensors="pt" ).to(_lowerCAmelCase ) # add boolean mask, indicating which patches to mask lowercase :Optional[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" ) lowercase :str = torch.load(_lowerCAmelCase ) # forward pass with torch.no_grad(): lowercase :Optional[Any] = model(**_lowerCAmelCase ) # verify the logits lowercase :str = torch.Size([1, 14_08, 15_36] ) lowercase :Union[str, Any] = torch.tensor( [[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=_lowerCAmelCase ) self.assertEqual(outputs.logits.shape , _lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _lowerCAmelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase :Union[str, Any] = torch.tensor([0.51_42] , device=_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase :Any = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=_lowerCAmelCase ).to( _lowerCAmelCase ) with torch.no_grad(): lowercase :List[str] = model(**_lowerCAmelCase ) lowercase :Tuple = torch.tensor(torch.tensor([0.64_69] ) , device=_lowerCAmelCase ) self.assertTrue(torch.allclose(outputs.loss , _lowerCAmelCase , atol=1e-4 ) )
158
1
import os import sys from contextlib import contextmanager # Windows only if os.name == "nt": import ctypes import msvcrt # noqa class __lowerCAmelCase ( ctypes.Structure ): # _fields is a specific attr expected by ctypes _a = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)] def a ( ) -> List[str]: """simple docstring""" if os.name == "nt": _lowercase =CursorInfo() _lowercase =ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(A__ , ctypes.byref(A__ ) ) _lowercase =False ctypes.windll.kernelaa.SetConsoleCursorInfo(A__ , ctypes.byref(A__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25l' ) sys.stdout.flush() def a ( ) -> List[Any]: """simple docstring""" if os.name == "nt": _lowercase =CursorInfo() _lowercase =ctypes.windll.kernelaa.GetStdHandle(-11 ) ctypes.windll.kernelaa.GetConsoleCursorInfo(A__ , ctypes.byref(A__ ) ) _lowercase =True ctypes.windll.kernelaa.SetConsoleCursorInfo(A__ , ctypes.byref(A__ ) ) elif os.name == "posix": sys.stdout.write('\033[?25h' ) sys.stdout.flush() @contextmanager def a ( ) -> Union[str, Any]: """simple docstring""" try: hide_cursor() yield finally: show_cursor()
205
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowercase_ = { 'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ 'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST', 'BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
205
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : List[str] = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } _lowerCAmelCase : int = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> Union[str, Any]: for attribute in key.split("." ): A_ : int = getattr(_lowerCAmelCase , _lowerCAmelCase ) if weight_type is not None: A_ : int = getattr(_lowerCAmelCase , _lowerCAmelCase ).shape else: A_ : int = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": A_ : int = value elif weight_type == "weight_g": A_ : Dict = value elif weight_type == "weight_v": A_ : List[str] = value elif weight_type == "bias": A_ : int = value elif weight_type == "running_mean": A_ : Union[str, Any] = value elif weight_type == "running_var": A_ : int = value elif weight_type == "num_batches_tracked": A_ : Optional[Any] = value elif weight_type == "inv_freq": A_ : List[Any] = value else: A_ : Optional[int] = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] ) -> Optional[Any]: A_ : str = [] A_ : int = fairseq_model.state_dict() A_ : Tuple = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): A_ : str = False if "conv_layers" in name: load_conv_layer( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , ) A_ : Any = True else: for key, mapped_key in MAPPING.items(): A_ : Optional[int] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: A_ : Any = True if "*" in mapped_key: A_ : Tuple = name.split(_lowerCAmelCase )[0].split("." )[-2] A_ : Union[str, Any] = mapped_key.replace("*" , _lowerCAmelCase ) if "pos_bias_u" in name: A_ : Optional[Any] = None elif "pos_bias_v" in name: A_ : int = None elif "weight_g" in name: A_ : Optional[Any] = "weight_g" elif "weight_v" in name: A_ : Tuple = "weight_v" elif "bias" in name: A_ : List[str] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj A_ : List[Any] = "weight" elif "running_mean" in name: A_ : Any = "running_mean" elif "inv_freq" in name: A_ : List[str] = "inv_freq" elif "running_var" in name: A_ : List[str] = "running_var" elif "num_batches_tracked" in name: A_ : Any = "num_batches_tracked" else: A_ : Tuple = None set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) continue if not is_used: unused_weights.append(_lowerCAmelCase ) logger.warning(f"Unused weights: {unused_weights}" ) def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> Any: A_ : Optional[int] = full_name.split("conv_layers." )[-1] A_ : Dict = name.split("." ) A_ : str = int(items[0] ) A_ : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) A_ : Optional[Any] = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) A_ : Dict = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." ) A_ : List[str] = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." ) A_ : Any = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_lowerCAmelCase ) @torch.no_grad() def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Union[str, Any]=True ) -> Optional[int]: if config_path is not None: A_ : int = WavaVecaConformerConfig.from_pretrained(_lowerCAmelCase , hidden_act="swish" ) else: A_ : int = WavaVecaConformerConfig() if "rope" in checkpoint_path: A_ : str = "rotary" if is_finetuned: if dict_path: A_ : Optional[int] = Dictionary.load(_lowerCAmelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq A_ : Optional[int] = target_dict.pad_index A_ : Union[str, Any] = target_dict.bos_index A_ : Any = target_dict.eos_index A_ : str = len(target_dict.symbols ) A_ : List[str] = os.path.join(_lowerCAmelCase , "vocab.json" ) if not os.path.isdir(_lowerCAmelCase ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase ) ) return os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase ) A_ : List[Any] = target_dict.indices # fairseq has the <pad> and <s> switched A_ : Dict = 0 A_ : List[Any] = 1 with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as vocab_handle: json.dump(_lowerCAmelCase , _lowerCAmelCase ) A_ : Tuple = WavaVecaCTCTokenizer( _lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , ) A_ : str = True if config.feat_extract_norm == "layer" else False A_ : Union[str, Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ) A_ : Tuple = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase ) processor.save_pretrained(_lowerCAmelCase ) A_ : List[str] = WavaVecaConformerForCTC(_lowerCAmelCase ) else: A_ : Any = WavaVecaConformerForPreTraining(_lowerCAmelCase ) if is_finetuned: A_ , A_ , A_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: A_ : Optional[int] = argparse.Namespace(task="audio_pretraining" ) A_ : Dict = fairseq.tasks.setup_task(_lowerCAmelCase ) A_ , A_ , A_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCAmelCase ) A_ : Optional[Any] = model[0].eval() recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , not is_finetuned ) hf_wavavec.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": _lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) _lowerCAmelCase : str = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
70
from collections.abc import Sequence def __snake_case ( _lowerCAmelCase : Sequence[int] | None = None ) -> int: if nums is None or not nums: raise ValueError("Input sequence should not be empty" ) A_ : Any = nums[0] for i in range(1 , len(_lowerCAmelCase ) ): A_ : Any = nums[i] A_ : List[str] = max(_lowerCAmelCase , ans + num , _lowerCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user _lowerCAmelCase : List[Any] = int(input('''Enter number of elements : ''').strip()) _lowerCAmelCase : Dict = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
70
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class UpperCamelCase ( lowercase ): UpperCAmelCase : UNetaDModel UpperCAmelCase : ScoreSdeVeScheduler def __init__(self : Optional[int] , _A : UNetaDModel , _A : ScoreSdeVeScheduler) -> str: super().__init__() self.register_modules(unet=_A , scheduler=_A) @torch.no_grad() def __call__(self : Optional[int] , _A : int = 1 , _A : int = 20_00 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[str] = "pil" , _A : bool = True , **_A : int , ) -> Union[ImagePipelineOutput, Tuple]: __snake_case : Any = self.unet.config.sample_size __snake_case : List[Any] = (batch_size, 3, img_size, img_size) __snake_case : Dict = self.unet __snake_case : Tuple = randn_tensor(_A , generator=_A) * self.scheduler.init_noise_sigma __snake_case : List[str] = sample.to(self.device) self.scheduler.set_timesteps(_A) self.scheduler.set_sigmas(_A) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): __snake_case : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): __snake_case : Union[str, Any] = self.unet(_A , _A).sample __snake_case : Union[str, Any] = self.scheduler.step_correct(_A , _A , generator=_A).prev_sample # prediction step __snake_case : Any = model(_A , _A).sample __snake_case : str = self.scheduler.step_pred(_A , _A , _A , generator=_A) __snake_case , __snake_case : Optional[Any] = output.prev_sample, output.prev_sample_mean __snake_case : Tuple = sample_mean.clamp(0 , 1) __snake_case : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": __snake_case : Any = self.numpy_to_pil(_A) if not return_dict: return (sample,) return ImagePipelineOutput(images=_A)
172
"""simple docstring""" _a : Tuple= 8.3_1_4_4_5_9_8 def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float: '''simple docstring''' if temperature < 0: raise Exception('Temperature cannot be less than 0 K' ) if molar_mass <= 0: raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example _a : Any= 300 _a : Optional[Any]= 28 _a : Optional[int]= rms_speed_of_molecule(temperature, molar_mass) print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
172
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCamelCase : List[str] = logging.get_logger(__name__) # General docstring lowerCamelCase : List[Any] = '''MobileNetV1Config''' # Base docstring lowerCamelCase : Optional[int] = '''google/mobilenet_v1_1.0_224''' lowerCamelCase : Optional[Any] = [1, 10_24, 7, 7] # Image classification docstring lowerCamelCase : Any = '''google/mobilenet_v1_1.0_224''' lowerCamelCase : Optional[Any] = '''tabby, tabby cat''' lowerCamelCase : Tuple = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def snake_case_ ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[int]=None ): __lowercase : List[Any] = {} if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : Any = model.mobilenet_va else: __lowercase : Tuple = model __lowercase : str = """MobilenetV1/Conv2d_0/""" __lowercase : List[Any] = backbone.conv_stem.convolution.weight __lowercase : List[Any] = backbone.conv_stem.normalization.bias __lowercase : Optional[Any] = backbone.conv_stem.normalization.weight __lowercase : List[Any] = backbone.conv_stem.normalization.running_mean __lowercase : Any = backbone.conv_stem.normalization.running_var for i in range(13 ): __lowercase : int = i + 1 __lowercase : Optional[Any] = i * 2 __lowercase : Optional[Any] = backbone.layer[pt_index] __lowercase : Dict = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowercase : Union[str, Any] = pointer.convolution.weight __lowercase : Optional[Any] = pointer.normalization.bias __lowercase : List[str] = pointer.normalization.weight __lowercase : List[str] = pointer.normalization.running_mean __lowercase : List[Any] = pointer.normalization.running_var __lowercase : List[Any] = backbone.layer[pt_index + 1] __lowercase : Any = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowercase : List[str] = pointer.convolution.weight __lowercase : Dict = pointer.normalization.bias __lowercase : str = pointer.normalization.weight __lowercase : int = pointer.normalization.running_mean __lowercase : List[str] = pointer.normalization.running_var if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): __lowercase : List[str] = """MobilenetV1/Logits/Conv2d_1c_1x1/""" __lowercase : Dict = model.classifier.weight __lowercase : Tuple = model.classifier.bias return tf_to_pt_map def snake_case_ ( lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( """Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """ """https://www.tensorflow.org/install/ for installation instructions.""" ) raise # Load weights from TF model __lowercase : List[Any] = tf.train.list_variables(lowerCAmelCase_ ) __lowercase : Union[str, Any] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowercase : Dict = tf.train.load_variable(lowerCAmelCase_ , lowerCAmelCase_ ) __lowercase : Dict = array # Build TF to PyTorch weights loading map __lowercase : Union[str, Any] = _build_tf_to_pytorch_map(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowercase : Tuple = tf_weights[name] if "depthwise_weights" in name: logger.info("""Transposing depthwise""" ) __lowercase : Optional[int] = np.transpose(lowerCAmelCase_ , (2, 3, 0, 1) ) elif "weights" in name: logger.info("""Transposing""" ) if len(pointer.shape ) == 2: # copying into linear layer __lowercase : Any = array.squeeze().transpose() else: __lowercase : int = np.transpose(lowerCAmelCase_ , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowercase : Tuple = torch.from_numpy(lowerCAmelCase_ ) tf_weights.pop(lowerCAmelCase_ , lowerCAmelCase_ ) tf_weights.pop(name + """/RMSProp""" , lowerCAmelCase_ ) tf_weights.pop(name + """/RMSProp_1""" , lowerCAmelCase_ ) tf_weights.pop(name + """/ExponentialMovingAverage""" , lowerCAmelCase_ ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def snake_case_ ( lowerCAmelCase_ : torch.Tensor , lowerCAmelCase_ : nn.Convad ): __lowercase , __lowercase : List[str] = features.shape[-2:] __lowercase , __lowercase : Optional[int] = conv_layer.stride __lowercase , __lowercase : Tuple = conv_layer.kernel_size if in_height % stride_height == 0: __lowercase : Dict = max(kernel_height - stride_height , 0 ) else: __lowercase : List[str] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowercase : Any = max(kernel_width - stride_width , 0 ) else: __lowercase : Any = max(kernel_width - (in_width % stride_width) , 0 ) __lowercase : int = pad_along_width // 2 __lowercase : List[Any] = pad_along_width - pad_left __lowercase : Dict = pad_along_height // 2 __lowercase : Union[str, Any] = pad_along_height - pad_top __lowercase : Any = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(lowerCAmelCase_ , lowerCAmelCase_ , """constant""" , 0.0 ) class lowerCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self : int , __a : MobileNetVaConfig , __a : int , __a : int , __a : int , __a : Optional[int] = 1 , __a : Optional[int] = 1 , __a : bool = False , __a : Optional[bool] = True , __a : Optional[bool or str] = True , ) -> None: """simple docstring""" super().__init__() __lowercase : str = config if in_channels % groups != 0: raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." ) if out_channels % groups != 0: raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." ) __lowercase : List[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __lowercase : List[str] = nn.Convad( in_channels=__a , out_channels=__a , kernel_size=__a , stride=__a , padding=__a , groups=__a , bias=__a , padding_mode="""zeros""" , ) if use_normalization: __lowercase : Optional[Any] = nn.BatchNormad( num_features=__a , eps=config.layer_norm_eps , momentum=0.9997 , affine=__a , track_running_stats=__a , ) else: __lowercase : Tuple = None if use_activation: if isinstance(__a , __a ): __lowercase : int = ACTaFN[use_activation] elif isinstance(config.hidden_act , __a ): __lowercase : Optional[int] = ACTaFN[config.hidden_act] else: __lowercase : List[str] = config.hidden_act else: __lowercase : Optional[Any] = None def lowerCAmelCase ( self : Union[str, Any] , __a : torch.Tensor ) -> torch.Tensor: """simple docstring""" if self.config.tf_padding: __lowercase : Union[str, Any] = apply_tf_padding(__a , self.convolution ) __lowercase : List[str] = self.convolution(__a ) if self.normalization is not None: __lowercase : Dict = self.normalization(__a ) if self.activation is not None: __lowercase : Union[str, Any] = self.activation(__a ) return features class lowerCAmelCase ( __a ): '''simple docstring''' _A : Optional[int] = MobileNetVaConfig _A : Optional[int] = load_tf_weights_in_mobilenet_va _A : Tuple = '''mobilenet_v1''' _A : Any = '''pixel_values''' _A : List[Any] = False def lowerCAmelCase ( self : Any , __a : Union[nn.Linear, nn.Convad] ) -> None: """simple docstring""" if isinstance(__a , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(__a , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCamelCase : Any = r''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' lowerCamelCase : Any = r''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( '''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , __a , ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : Any , __a : MobileNetVaConfig , __a : bool = True ) -> Any: """simple docstring""" super().__init__(__a ) __lowercase : str = config __lowercase : Dict = 32 __lowercase : Optional[Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) __lowercase : Optional[Any] = MobileNetVaConvLayer( __a , in_channels=config.num_channels , out_channels=__a , kernel_size=3 , stride=2 , ) __lowercase : Optional[int] = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowercase : int = nn.ModuleList() for i in range(13 ): __lowercase : List[str] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowercase : int = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=3 , stride=strides[i] , groups=__a , ) ) self.layer.append( MobileNetVaConvLayer( __a , in_channels=__a , out_channels=__a , kernel_size=1 , ) ) __lowercase : str = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def lowerCAmelCase ( self : List[str] , __a : Tuple ) -> Any: """simple docstring""" raise NotImplementedError @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=__a , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def lowerCAmelCase ( self : str , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: """simple docstring""" __lowercase : List[str] = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("""You have to specify pixel_values""" ) __lowercase : Tuple = self.conv_stem(__a ) __lowercase : Optional[Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __lowercase : Optional[int] = layer_module(__a ) if output_hidden_states: __lowercase : int = all_hidden_states + (hidden_states,) __lowercase : List[str] = hidden_states if self.pooler is not None: __lowercase : List[Any] = torch.flatten(self.pooler(__a ) , start_dim=1 ) else: __lowercase : str = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=__a , pooler_output=__a , hidden_states=__a , ) @add_start_docstrings( ''' MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for ImageNet. ''' , __a , ) class lowerCAmelCase ( __a ): '''simple docstring''' def __init__( self : str , __a : MobileNetVaConfig ) -> None: """simple docstring""" super().__init__(__a ) __lowercase : List[str] = config.num_labels __lowercase : Optional[int] = MobileNetVaModel(__a ) __lowercase : List[Any] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowercase : Any = nn.Dropout(config.classifier_dropout_prob , inplace=__a ) __lowercase : Tuple = nn.Linear(__a , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(__a ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__a , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def lowerCAmelCase ( self : Any , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , __a : Optional[torch.Tensor] = None , __a : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: """simple docstring""" __lowercase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __lowercase : Optional[int] = self.mobilenet_va(__a , output_hidden_states=__a , return_dict=__a ) __lowercase : List[Any] = outputs.pooler_output if return_dict else outputs[1] __lowercase : Tuple = self.classifier(self.dropout(__a ) ) __lowercase : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowercase : List[str] = """regression""" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowercase : List[Any] = """single_label_classification""" else: __lowercase : Tuple = """multi_label_classification""" if self.config.problem_type == "regression": __lowercase : Any = MSELoss() if self.num_labels == 1: __lowercase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowercase : int = loss_fct(__a , __a ) elif self.config.problem_type == "single_label_classification": __lowercase : List[str] = CrossEntropyLoss() __lowercase : List[Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowercase : Dict = BCEWithLogitsLoss() __lowercase : List[str] = loss_fct(__a , __a ) if not return_dict: __lowercase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=__a , logits=__a , hidden_states=outputs.hidden_states , )
306
def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : int = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def snake_case_ ( lowerCAmelCase_ : int = 5000 ): __lowercase : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase_ )] for i, pentagonal_i in enumerate(lowerCAmelCase_ ): for j in range(lowerCAmelCase_ , len(lowerCAmelCase_ ) ): __lowercase : int = pentagonal_nums[j] __lowercase : Optional[int] = pentagonal_i + pentagonal_j __lowercase : Union[str, Any] = pentagonal_j - pentagonal_i if is_pentagonal(lowerCAmelCase_ ) and is_pentagonal(lowerCAmelCase_ ): return b return -1 if __name__ == "__main__": print(f'''{solution() = }''')
306
1
'''simple docstring''' import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList a_ : Union[str, Any] = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""] class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=1 ): """simple docstring""" lowerCamelCase_ = tokenizer lowerCamelCase_ = dataset lowerCamelCase_ = len(UpperCamelCase ) if n_tasks is None else n_tasks lowerCamelCase_ = n_copies def __iter__( self ): """simple docstring""" lowerCamelCase_ = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() ) lowerCamelCase_ = self.tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="pt" ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class snake_case ( lowercase ): """simple docstring""" def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ): """simple docstring""" lowerCamelCase_ = start_length lowerCamelCase_ = eof_strings lowerCamelCase_ = tokenizer def __call__( self , UpperCamelCase , UpperCamelCase , **UpperCamelCase ): """simple docstring""" lowerCamelCase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) lowerCamelCase_ = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(UpperCamelCase ) def __snake_case ( UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = re.split("(%s)" % "|".join(UpperCAmelCase_ ) , UpperCAmelCase_ ) # last string should be "" return "".join(string_list[:-2] ) def __snake_case ( UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : str=20 , **UpperCAmelCase_ : Optional[Any] ): lowerCamelCase_ = defaultdict(UpperCAmelCase_ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(UpperCAmelCase_ ) ): with torch.no_grad(): lowerCamelCase_ = batch["ids"].shape[-1] lowerCamelCase_ = accelerator.unwrap_model(UpperCAmelCase_ ).generate( input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=UpperCAmelCase_ , **UpperCAmelCase_ ) # each task is generated batch_size times lowerCamelCase_ = batch["task_id"].repeat(UpperCAmelCase_ ) lowerCamelCase_ = accelerator.pad_across_processes( UpperCAmelCase_ , dim=1 , pad_index=tokenizer.pad_token_id ) lowerCamelCase_ ,lowerCamelCase_ = accelerator.gather((generated_tokens, generated_tasks) ) lowerCamelCase_ = generated_tokens.cpu().numpy() lowerCamelCase_ = generated_tasks.cpu().numpy() for task, generated_tokens in zip(UpperCAmelCase_ , UpperCAmelCase_ ): gen_token_dict[task].append(UpperCAmelCase_ ) lowerCamelCase_ = [[] for _ in range(UpperCAmelCase_ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: lowerCamelCase_ = tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ ) code_gens[task].append(remove_last_block(UpperCAmelCase_ ) ) return code_gens def __snake_case ( ): # Setup configuration lowerCamelCase_ = HfArgumentParser(UpperCAmelCase_ ) lowerCamelCase_ = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric lowerCamelCase_ = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing lowerCamelCase_ = "false" if args.num_workers is None: lowerCamelCase_ = multiprocessing.cpu_count() # Use dataset load to feed to accelerate lowerCamelCase_ = Accelerator() set_seed(args.seed , device_specific=UpperCAmelCase_ ) # Load model and tokenizer lowerCamelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt ) lowerCamelCase_ = tokenizer.eos_token lowerCamelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings lowerCamelCase_ = { "do_sample": args.do_sample, "temperature": args.temperature, "max_new_tokens": args.max_new_tokens, "top_p": args.top_p, "top_k": args.top_k, "stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCAmelCase_ , UpperCAmelCase_ )] ), } # Load evaluation dataset and metric lowerCamelCase_ = load_dataset("openai_humaneval" ) lowerCamelCase_ = load_metric("code_eval" ) lowerCamelCase_ = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] ) lowerCamelCase_ = args.n_samples // args.batch_size lowerCamelCase_ = TokenizedDataset(UpperCAmelCase_ , human_eval["test"] , n_copies=UpperCAmelCase_ , n_tasks=UpperCAmelCase_ ) # do not confuse args.batch_size, which is actually the num_return_sequences lowerCamelCase_ = DataLoader(UpperCAmelCase_ , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: lowerCamelCase_ = code_eval_metric.compute(references=[""] , predictions=[[""]] ) except ValueError as exception: print( "Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`" " flag to enable code evaluation." ) raise exception lowerCamelCase_ ,lowerCamelCase_ = accelerator.prepare(UpperCAmelCase_ , UpperCAmelCase_ ) lowerCamelCase_ = complete_code( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , n_tasks=UpperCAmelCase_ , batch_size=args.batch_size , **UpperCAmelCase_ , ) if accelerator.is_main_process: lowerCamelCase_ = [] for task in tqdm(range(UpperCAmelCase_ ) ): lowerCamelCase_ = human_eval["test"][task]["test"] lowerCamelCase_ = F'''check({human_eval["test"][task]["entry_point"]})''' references.append("\n" + test_func + "\n" + entry_point ) # Evaluate completions with "code_eval" metric lowerCamelCase_ ,lowerCamelCase_ = code_eval_metric.compute( references=UpperCAmelCase_ , predictions=UpperCAmelCase_ , num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , "w" ) as fp: json.dump(UpperCAmelCase_ , UpperCAmelCase_ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
55
import json import sys import tempfile import unittest from pathlib import Path import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, AutoConfig, AutoFeatureExtractor, WavaVecaConfig, WavaVecaFeatureExtractor, ) from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = 0 def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Tuple ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : int ): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: UpperCamelCase = WavaVecaConfig() # remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ).to_dict() config_dict.pop("""feature_extractor_type""" ) UpperCamelCase = WavaVecaFeatureExtractor(**lowerCamelCase_ ) # save in new folder model_config.save_pretrained(lowerCamelCase_ ) config.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) # make sure private variable is not incorrectly saved UpperCamelCase = json.loads(config.to_json_string() ) self.assertTrue("""_processor_class""" not in dict_as_saved ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """bert-base is not a local folder and is not a valid model identifier""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""bert-base""" ) def lowerCamelCase_ ( self : Dict ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , revision="""aaaaaa""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" with self.assertRaisesRegex( lowerCamelCase_ , """hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.""" , ): UpperCamelCase = AutoFeatureExtractor.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(lowerCamelCase_ ): UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) # Test feature extractor can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ , trust_remote_code=lowerCamelCase_ ) self.assertEqual(reloaded_feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) def lowerCamelCase_ ( self : List[str] ): """simple docstring""" try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(lowerCamelCase_ ): AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # Now that the config is registered, it can be used as any other config with the auto-API UpperCamelCase = CustomFeatureExtractor.from_pretrained(lowerCamelCase_ ) with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained(lowerCamelCase_ ) UpperCamelCase = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ ) self.assertIsInstance(lowerCamelCase_ , lowerCamelCase_ ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase_ ( self : Any ): """simple docstring""" class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ): __lowerCAmelCase = True try: AutoConfig.register("""custom""" , lowerCamelCase_ ) AutoFeatureExtractor.register(lowerCamelCase_ , lowerCamelCase_ ) # If remote code is not set, the default is to use local UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote code is disabled, we load the local one. UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(feature_extractor.is_local ) # If remote is enabled, we load from the Hub UpperCamelCase = AutoFeatureExtractor.from_pretrained( """hf-internal-testing/test_dynamic_feature_extractor""" , trust_remote_code=lowerCamelCase_ ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) self.assertTrue(not hasattr(lowerCamelCase_ , """is_local""" ) ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
343
0
"""simple docstring""" import copy from ...configuration_utils import PretrainedConfig from ...utils import add_start_docstrings SCREAMING_SNAKE_CASE_ : Dict = r'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n' @add_start_docstrings(_lowerCamelCase ) class a ( _lowerCamelCase ): """simple docstring""" UpperCAmelCase = "rag" UpperCAmelCase = True def __init__( self: str , UpperCamelCase: Optional[int]=None , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=None , UpperCamelCase: Dict=None , UpperCamelCase: Any=None , UpperCamelCase: Any=None , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: int=" / " , UpperCamelCase: str=" // " , UpperCamelCase: Any=5 , UpperCamelCase: Optional[Any]=3_00 , UpperCamelCase: Any=7_68 , UpperCamelCase: Tuple=8 , UpperCamelCase: Union[str, Any]="wiki_dpr" , UpperCamelCase: Union[str, Any]="train" , UpperCamelCase: Any="compressed" , UpperCamelCase: Tuple=None , UpperCamelCase: Optional[Any]=None , UpperCamelCase: int=False , UpperCamelCase: str=False , UpperCamelCase: str=0.0 , UpperCamelCase: int=True , UpperCamelCase: Tuple=False , UpperCamelCase: int=False , UpperCamelCase: Optional[int]=False , UpperCamelCase: Optional[int]=True , UpperCamelCase: List[str]=None , **UpperCamelCase: int , ): """simple docstring""" super().__init__( bos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , prefix=UpperCamelCase , vocab_size=UpperCamelCase , **UpperCamelCase , ) assert ( "question_encoder" in kwargs and "generator" in kwargs ), "Config has to be initialized with question_encoder and generator config" A__ = kwargs.pop("""question_encoder""" ) A__ = question_encoder_config.pop("""model_type""" ) A__ = kwargs.pop("""generator""" ) A__ = decoder_config.pop("""model_type""" ) from ..auto.configuration_auto import AutoConfig A__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) A__ = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) A__ = reduce_loss A__ = label_smoothing A__ = exclude_bos_score A__ = do_marginalize A__ = title_sep A__ = doc_sep A__ = n_docs A__ = max_combined_length A__ = dataset A__ = dataset_split A__ = index_name A__ = retrieval_vector_size A__ = retrieval_batch_size A__ = passages_path A__ = index_path A__ = use_dummy_dataset A__ = output_retrieved A__ = do_deduplication A__ = use_cache if self.forced_eos_token_id is None: A__ = getattr(self.generator , """forced_eos_token_id""" , UpperCamelCase ) @classmethod def UpperCamelCase ( cls: List[str] , UpperCamelCase: PretrainedConfig , UpperCamelCase: PretrainedConfig , **UpperCamelCase: Tuple ): """simple docstring""" return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **UpperCamelCase ) def UpperCamelCase ( self: int ): """simple docstring""" A__ = copy.deepcopy(self.__dict__ ) A__ = self.question_encoder.to_dict() A__ = self.generator.to_dict() A__ = self.__class__.model_type return output
69
"""simple docstring""" import unittest from transformers import DebertaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, ) from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST class a ( _lowerCamelCase ): """simple docstring""" def __init__( self: int , UpperCamelCase: int , UpperCamelCase: Union[str, Any]=13 , UpperCamelCase: List[Any]=7 , UpperCamelCase: Any=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Optional[Any]=True , UpperCamelCase: str=True , UpperCamelCase: Optional[int]=99 , UpperCamelCase: Optional[Any]=32 , UpperCamelCase: Tuple=5 , UpperCamelCase: Optional[int]=4 , UpperCamelCase: int=37 , UpperCamelCase: str="gelu" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: List[Any]=0.1 , UpperCamelCase: Tuple=5_12 , UpperCamelCase: List[str]=16 , UpperCamelCase: List[str]=2 , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: List[str]=False , UpperCamelCase: int=True , UpperCamelCase: Union[str, Any]="None" , UpperCamelCase: Optional[int]=3 , UpperCamelCase: List[str]=4 , UpperCamelCase: List[str]=None , ): """simple docstring""" A__ = parent A__ = batch_size A__ = seq_length A__ = is_training A__ = use_input_mask A__ = use_token_type_ids A__ = use_labels A__ = vocab_size A__ = hidden_size A__ = num_hidden_layers A__ = num_attention_heads A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = max_position_embeddings A__ = type_vocab_size A__ = type_sequence_label_size A__ = initializer_range A__ = num_labels A__ = num_choices A__ = relative_attention A__ = position_biased_input A__ = pos_att_type A__ = scope def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) A__ = None if self.use_input_mask: A__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) A__ = None if self.use_token_type_ids: A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) A__ = None A__ = None A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) A__ = ids_tensor([self.batch_size] , self.num_choices ) A__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase ( self: str ): """simple docstring""" return DebertaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = self.get_config() A__ = 3_00 return config def UpperCamelCase ( self: List[Any] , UpperCamelCase: str ): """simple docstring""" self.parent.assertListEqual(list(result.loss.size() ) , [] ) def UpperCamelCase ( self: Tuple , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[Any] , UpperCamelCase: List[str] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: Tuple ): """simple docstring""" A__ = DebertaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase )[0] A__ = model(UpperCamelCase , token_type_ids=UpperCamelCase )[0] A__ = model(UpperCamelCase )[0] self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] ) def UpperCamelCase ( self: List[str] , UpperCamelCase: Optional[int] , UpperCamelCase: Optional[Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: Tuple , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: Any ): """simple docstring""" A__ = DebertaForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: int , UpperCamelCase: Dict , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: str ): """simple docstring""" A__ = self.num_labels A__ = DebertaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] ) self.check_loss_output(UpperCamelCase ) def UpperCamelCase ( self: Optional[int] , UpperCamelCase: List[Any] , UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: int , UpperCamelCase: Optional[Any] , UpperCamelCase: str , UpperCamelCase: int ): """simple docstring""" A__ = self.num_labels A__ = DebertaForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self: Tuple , UpperCamelCase: Tuple , UpperCamelCase: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Tuple , UpperCamelCase: Any ): """simple docstring""" A__ = DebertaForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.prepare_config_and_inputs() ( ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ( A__ ) , ) = config_and_inputs A__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = ( ( DebertaModel, DebertaForMaskedLM, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaForQuestionAnswering, ) if is_torch_available() else () ) UpperCAmelCase = ( { "feature-extraction": DebertaModel, "fill-mask": DebertaForMaskedLM, "question-answering": DebertaForQuestionAnswering, "text-classification": DebertaForSequenceClassification, "token-classification": DebertaForTokenClassification, "zero-shot": DebertaForSequenceClassification, } if is_torch_available() else {} ) UpperCAmelCase = True UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = DebertaModelTester(self ) A__ = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def UpperCamelCase ( self: int ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_model(*UpperCamelCase ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_sequence_classification(*UpperCamelCase ) def UpperCamelCase ( self: List[Any] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_masked_lm(*UpperCamelCase ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_question_answering(*UpperCamelCase ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_deberta_for_token_classification(*UpperCamelCase ) @slow def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = DebertaModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @require_torch @require_sentencepiece @require_tokenizers class a ( unittest.TestCase ): """simple docstring""" @unittest.skip(reason="""Model not available yet""" ) def UpperCamelCase ( self: Any ): """simple docstring""" pass @slow def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ = DebertaModel.from_pretrained("""microsoft/deberta-base""" ) A__ = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) A__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): A__ = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] # compare the actual values for a slice. A__ = torch.tensor( [[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) , f"""{output[:, 1:4, 1:4]}""" )
69
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''', [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ], ) def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : List[str] ): """simple docstring""" _a = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''', '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''', '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''', '''w''' ) as f: f.write('''{\"default\": {\"dataset_size\": 42}}''' ) _a = DatasetInfosDict.from_directory(_lowerCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''', [ DatasetInfo(), DatasetInfo( description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ), ], ) def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : DatasetInfo ): """simple docstring""" _a = str(_lowerCAmelCase ) dataset_info.write_to_directory(_lowerCAmelCase ) _a = DatasetInfo.from_directory(_lowerCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(_lowerCAmelCase, '''dataset_info.json''' ) ) def A_ ( ): """simple docstring""" _a = DatasetInfo( description='''foo''', citation='''bar''', homepage='''https://foo.bar''', license='''CC0''', features=Features({'''a''': Value('''int32''' )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train''', '''num_examples''': 42}], download_checksums={}, download_size=13_37, post_processing_size=4_42, dataset_size=12_34, size_in_bytes=13_37 + 4_42 + 12_34, ) _a = dataset_info._to_yaml_dict() assert sorted(_lowerCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) ) _a = yaml.safe_dump(_lowerCAmelCase ) _a = yaml.safe_load(_lowerCAmelCase ) assert dataset_info_yaml_dict == reloaded def A_ ( ): """simple docstring""" _a = DatasetInfo() _a = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''', [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''', features=Features({'''a''': Value('''int32''' )} ), builder_name='''builder''', config_name='''config''', version='''1.0.0''', splits=[{'''name''': '''train'''}], download_size=42, ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=13_37 ), } ), ], ) def A_ ( _lowerCAmelCase : Tuple, _lowerCAmelCase : DatasetInfosDict ): """simple docstring""" _a = str(_lowerCAmelCase ) dataset_infos_dict.write_to_directory(_lowerCAmelCase ) _a = DatasetInfosDict.from_directory(_lowerCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _a = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _a = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(_lowerCAmelCase, '''README.md''' ) )
320
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Optional[Any] = """philschmid/bart-large-cnn-samsum""" _lowercase : List[Any] = ( """This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """ """and returns a summary of the text.""" ) _lowercase : Any = """summarizer""" _lowercase : Any = AutoTokenizer _lowercase : str = AutoModelForSeqaSeqLM _lowercase : Optional[int] = ["""text"""] _lowercase : Optional[int] = ["""text"""] def _lowercase ( self , lowerCAmelCase__ ) -> Tuple: '''simple docstring''' return self.pre_processor(lowerCAmelCase__ , return_tensors="pt" , truncation=lowerCAmelCase__ ) def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' return self.model.generate(**lowerCAmelCase__ )[0] def _lowercase ( self , lowerCAmelCase__ ) -> Any: '''simple docstring''' return self.pre_processor.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
95
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available SCREAMING_SNAKE_CASE__:Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__:Any = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys SCREAMING_SNAKE_CASE__:Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
268
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: SCREAMING_SNAKE_CASE__:List[Any] = None SCREAMING_SNAKE_CASE__:Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__:Tuple = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__:Optional[int] = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", }, """tokenizer_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE__:List[str] = { """camembert-base""": 512, } SCREAMING_SNAKE_CASE__:str = """▁""" class snake_case__ ( snake_case_ ): _snake_case : List[Any] = VOCAB_FILES_NAMES _snake_case : List[Any] = PRETRAINED_VOCAB_FILES_MAP _snake_case : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _snake_case : Any = ["""input_ids""", """attention_mask"""] _snake_case : str = CamembertTokenizer def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase , ): # Mask token behave like a normal word, i.e. include the space before it __a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else mask_token super().__init__( lowerCamelCase , tokenizer_file=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , ) __a = vocab_file __a = False if not self.vocab_file else True def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __a = [self.cls_token_id] __a = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a__ ( self , lowerCamelCase , lowerCamelCase = None ): __a = [self.sep_token_id] __a = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def a__ ( self , lowerCamelCase , lowerCamelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowerCamelCase ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return __a = os.path.join( lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ): copyfile(self.vocab_file , lowerCamelCase ) return (out_vocab_file,)
268
1
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__: List[Any] = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__: Any = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys __magic_name__: Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
342
import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase ( _A, _A, _A ): """simple docstring""" __magic_name__ : Dict = MobileBertConfig.from_json_file(_A ) print(f'Building PyTorch model from configuration: {config}' ) __magic_name__ : Tuple = MobileBertForPreTraining(_A ) # Load weights from tf checkpoint __magic_name__ : int = load_tf_weights_in_mobilebert(_A, _A, _A ) # Save pytorch-model print(f'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict(), _A ) if __name__ == "__main__": __magic_name__: Any = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) __magic_name__: Dict = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
342
1
from __future__ import annotations import csv import requests from bsa import BeautifulSoup def snake_case (__lowercase = "" ) -> dict[str, float]: '''simple docstring''' _snake_case : str = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250" _snake_case : Optional[Any] = BeautifulSoup(requests.get(__lowercase ).text , "html.parser" ) _snake_case : int = soup.find_all("td" , attrs="titleColumn" ) _snake_case : Dict = soup.find_all("td" , class_="ratingColumn imdbRating" ) return { title.a.text: float(rating.strong.text ) for title, rating in zip(__lowercase , __lowercase ) } def snake_case (__lowercase = "IMDb_Top_250_Movies.csv" ) -> None: '''simple docstring''' _snake_case : int = get_imdb_top_aaa_movies() with open(__lowercase , "w" , newline="" ) as out_file: _snake_case : Union[str, Any] = csv.writer(__lowercase ) writer.writerow(["Movie title", "IMDb rating"] ) for title, rating in movies.items(): writer.writerow([title, rating] ) if __name__ == "__main__": write_movies()
284
from __future__ import annotations import string from itertools import cycle, product from pathlib import Path __SCREAMING_SNAKE_CASE : str = ( string.ascii_letters + string.digits + string.punctuation + string.whitespace ) __SCREAMING_SNAKE_CASE : list[int] = [ord(letter) for letter in string.ascii_lowercase] __SCREAMING_SNAKE_CASE : set[int] = {ord(char) for char in VALID_CHARS} __SCREAMING_SNAKE_CASE : list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"] def snake_case (__lowercase , __lowercase ) -> str | None: '''simple docstring''' _snake_case : str = "" _snake_case : int _snake_case : int _snake_case : int for keychar, cipherchar in zip(cycle(__lowercase ) , __lowercase ): _snake_case : str = cipherchar ^ keychar if decodedchar not in VALID_INTS: return None decoded += chr(__lowercase ) return decoded def snake_case (__lowercase ) -> list[str]: '''simple docstring''' _snake_case : list[str] = [] for key in product(__lowercase , repeat=3 ): _snake_case : Union[str, Any] = try_key(__lowercase , __lowercase ) if encoded is not None: possibles.append(__lowercase ) return possibles def snake_case (__lowercase , __lowercase ) -> list[str]: '''simple docstring''' return [possible for possible in possibles if common_word in possible.lower()] def snake_case (__lowercase = "p059_cipher.txt" ) -> int: '''simple docstring''' _snake_case : list[int] _snake_case : list[str] _snake_case : str _snake_case : str _snake_case : str = Path(__lowercase ).parent.joinpath(__lowercase ).read_text(encoding="utf-8" ) _snake_case : Dict = [int(__lowercase ) for number in data.strip().split("," )] _snake_case : Tuple = filter_valid_chars(__lowercase ) for common_word in COMMON_WORDS: _snake_case : Optional[int] = filter_common_word(__lowercase , __lowercase ) if len(__lowercase ) == 1: break _snake_case : int = possibles[0] return sum(ord(__lowercase ) for char in decoded_text ) if __name__ == "__main__": print(F'''{solution() = }''')
284
1
from math import factorial def UpperCAmelCase ( a_ , a_ ) -> int: """simple docstring""" if n < k or k < 0: raise ValueError("Please enter positive integers for n and k where n >= k" ) return factorial(a_ ) // (factorial(a_ ) * factorial(n - k )) if __name__ == "__main__": print( 'The number of five-card hands possible from a standard', f'''fifty-two card deck is: {combinations(52, 5)}\n''', ) print( 'If a class of 40 students must be arranged into groups of', f'''4 for group projects, there are {combinations(40, 4)} ways''', 'to arrange them.\n', ) print( 'If 10 teams are competing in a Formula One race, there', f'''are {combinations(10, 3)} ways that first, second and''', 'third place can be awarded.', )
15
import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor snake_case_ = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ (__snake_case ): def __init__( self , *a , **a): warnings.warn( 'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use DPTImageProcessor instead.' , a , ) super().__init__(*a , **a)
214
0
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run UpperCAmelCase_ = True except (ImportError, AttributeError): UpperCAmelCase_ = object def lowerCamelCase__ ( *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Any ) -> int: '''simple docstring''' pass UpperCAmelCase_ = False UpperCAmelCase_ = logging.get_logger("""transformers-cli/serving""") def lowerCamelCase__ ( UpperCamelCase__ : Namespace ) -> Dict: '''simple docstring''' _snake_case = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(snake_case__ , args.host , args.port , args.workers ) class UpperCamelCase_ ( __lowerCAmelCase ): lowerCAmelCase_ = 42 class UpperCamelCase_ ( __lowerCAmelCase ): lowerCAmelCase_ = 42 lowerCAmelCase_ = 42 class UpperCamelCase_ ( __lowerCAmelCase ): lowerCAmelCase_ = 42 class UpperCamelCase_ ( __lowerCAmelCase ): lowerCAmelCase_ = 42 class UpperCamelCase_ ( __lowerCAmelCase ): @staticmethod def lowerCAmelCase ( lowerCAmelCase_ ) -> Any: _snake_case = parser.add_parser( 'serve' , help='CLI tool to run inference requests through REST and GraphQL endpoints.' ) serve_parser.add_argument( '--task' , type=lowerCAmelCase_ , choices=get_supported_tasks() , help='The task to run the pipeline on' , ) serve_parser.add_argument('--host' , type=lowerCAmelCase_ , default='localhost' , help='Interface the server will listen on.' ) serve_parser.add_argument('--port' , type=lowerCAmelCase_ , default=8888 , help='Port the serving will listen to.' ) serve_parser.add_argument('--workers' , type=lowerCAmelCase_ , default=1 , help='Number of http workers' ) serve_parser.add_argument('--model' , type=lowerCAmelCase_ , help='Model\'s name or path to stored model.' ) serve_parser.add_argument('--config' , type=lowerCAmelCase_ , help='Model\'s config name or path to stored model.' ) serve_parser.add_argument('--tokenizer' , type=lowerCAmelCase_ , help='Tokenizer name to use.' ) serve_parser.add_argument( '--device' , type=lowerCAmelCase_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , ) serve_parser.set_defaults(func=lowerCAmelCase_ ) def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = pipeline _snake_case = host _snake_case = port _snake_case = workers if not _serve_dependencies_installed: raise RuntimeError( 'Using serve command requires FastAPI and uvicorn. ' 'Please install transformers with [serving]: pip install \"transformers[serving]\".' 'Or install FastAPI and uvicorn separately.' ) else: logger.info(F'''Serving model over {host}:{port}''' ) _snake_case = FastAPI( routes=[ APIRoute( '/' , self.model_info , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['GET'] , ), APIRoute( '/tokenize' , self.tokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ), APIRoute( '/detokenize' , self.detokenize , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ), APIRoute( '/forward' , self.forward , response_model=lowerCAmelCase_ , response_class=lowerCAmelCase_ , methods=['POST'] , ), ] , timeout=600 , ) def lowerCAmelCase ( self ) -> str: run(self._app , host=self.host , port=self.port , workers=self.workers ) def lowerCAmelCase ( self ) -> Union[str, Any]: return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def lowerCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> List[Any]: try: _snake_case = self._pipeline.tokenizer.tokenize(lowerCAmelCase_ ) if return_ids: _snake_case = self._pipeline.tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) return ServeTokenizeResult(tokens=lowerCAmelCase_ , tokens_ids=lowerCAmelCase_ ) else: return ServeTokenizeResult(tokens=lowerCAmelCase_ ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase_ )} ) def lowerCAmelCase ( self , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , lowerCAmelCase_ = Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) , ) -> Dict: try: _snake_case = self._pipeline.tokenizer.decode(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return ServeDeTokenizeResult(model='' , text=lowerCAmelCase_ ) except Exception as e: raise HTTPException(status_code=500 , detail={'model': '', 'error': str(lowerCAmelCase_ )} ) async def lowerCAmelCase ( self , lowerCAmelCase_=Body(lowerCAmelCase_ , embed=lowerCAmelCase_ ) ) -> Any: # Check we don't have empty string if len(lowerCAmelCase_ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model _snake_case = self._pipeline(lowerCAmelCase_ ) return ServeForwardResult(output=lowerCAmelCase_ ) except Exception as e: raise HTTPException(500 , {'error': str(lowerCAmelCase_ )} )
365
import inspect import unittest from typing import List import numpy as np from transformers import EfficientFormerConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, ) from transformers.models.efficientformer.modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_vision_available(): from PIL import Image from transformers import EfficientFormerImageProcessor class UpperCamelCase_ : def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = 13 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 3 , lowerCAmelCase_ = True , lowerCAmelCase_ = True , lowerCAmelCase_ = 128 , lowerCAmelCase_=[16, 32, 64, 128] , lowerCAmelCase_ = 7 , lowerCAmelCase_ = 4 , lowerCAmelCase_ = 37 , lowerCAmelCase_ = "gelu" , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 0.1 , lowerCAmelCase_ = 10 , lowerCAmelCase_ = 0.02 , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 128 , lowerCAmelCase_ = [2, 2, 2, 2] , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ) -> Dict: _snake_case = parent _snake_case = batch_size _snake_case = image_size _snake_case = patch_size _snake_case = num_channels _snake_case = is_training _snake_case = use_labels _snake_case = hidden_size _snake_case = num_hidden_layers _snake_case = num_attention_heads _snake_case = intermediate_size _snake_case = hidden_act _snake_case = hidden_dropout_prob _snake_case = attention_probs_dropout_prob _snake_case = type_sequence_label_size _snake_case = initializer_range _snake_case = encoder_stride _snake_case = num_attention_outputs _snake_case = embed_dim _snake_case = embed_dim + 1 _snake_case = resolution _snake_case = depths _snake_case = hidden_sizes _snake_case = dim _snake_case = mlp_expansion_ratio def lowerCAmelCase ( self ) -> Optional[Any]: _snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _snake_case = None if self.use_labels: _snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _snake_case = self.get_config() return config, pixel_values, labels def lowerCAmelCase ( self ) -> Tuple: return EfficientFormerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]: _snake_case = TFEfficientFormerModel(config=lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any: _snake_case = self.type_sequence_label_size _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ , training=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images _snake_case = 1 _snake_case = TFEfficientFormerForImageClassification(lowerCAmelCase_ ) _snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) _snake_case = model(lowerCAmelCase_ , labels=lowerCAmelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase ( self ) -> List[str]: _snake_case = self.prepare_config_and_inputs() _snake_case , _snake_case , _snake_case = config_and_inputs _snake_case = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): lowerCAmelCase_ = ( ( TFEfficientFormerModel, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerForImageClassification, ) if is_tf_available() else () ) lowerCAmelCase_ = ( { '''feature-extraction''': TFEfficientFormerModel, '''image-classification''': ( TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, ), } if is_tf_available() else {} ) lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False lowerCAmelCase_ = False def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerModelTester(self ) _snake_case = ConfigTester( self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def lowerCAmelCase ( self ) -> str: self.config_tester.run_common_tests() @unittest.skip(reason='EfficientFormer does not use inputs_embeds' ) def lowerCAmelCase ( self ) -> int: pass @unittest.skip(reason='EfficientFormer does not support input and output embeddings' ) def lowerCAmelCase ( self ) -> Optional[Any]: pass def lowerCAmelCase ( self ) -> str: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = model_class(lowerCAmelCase_ ) _snake_case = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _snake_case = [*signature.parameters.keys()] _snake_case = ['pixel_values'] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states _snake_case = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) if hasattr(self.model_tester , 'encoder_seq_length' ): _snake_case = self.model_tester.encoder_seq_length if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1: _snake_case = seq_length * self.model_tester.chunk_length else: _snake_case = self.model_tester.seq_length self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) if config.is_encoder_decoder: _snake_case = outputs.decoder_hidden_states self.asseretIsInstance(lowerCAmelCase_ , (list, tuple) ) self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase_ ) self.assertListEqual( list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , ) _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _snake_case = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> List[Any]: _snake_case = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ ) if return_labels: if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) @unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' ) def lowerCAmelCase ( self ) -> Dict: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[Any]: _snake_case = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ ) @slow def lowerCAmelCase ( self ) -> str: for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _snake_case = TFEfficientFormerModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def lowerCAmelCase ( self ) -> List[str]: _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() _snake_case = True _snake_case = getattr(self.model_tester , 'seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'key_length' , lowerCAmelCase_ ) _snake_case = getattr(self.model_tester , 'chunk_length' , lowerCAmelCase_ ) if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ): _snake_case = encoder_seq_length * self.model_tester.num_hashes for model_class in self.all_model_classes: _snake_case = True _snake_case = False _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _snake_case = True _snake_case = model_class(lowerCAmelCase_ ) _snake_case = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) , training=lowerCAmelCase_ ) _snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_attention_outputs ) if chunk_length is not None: self.assertListEqual( list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , ) else: self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , ) def lowerCAmelCase ( self ) -> Dict: # We use a simplified version of this test for EfficientFormer because it requires training=False # and Keras refuses to let us force that during functional construction _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # Prepare our model _snake_case = model_class(lowerCAmelCase_ ) # These are maximally general inputs for the model, with multiple None dimensions # Hopefully this will catch any conditionals that fail for flexible shapes _snake_case = { key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowerCAmelCase_ ) for key, val in model.input_signature.items() if key in model.dummy_inputs } _snake_case = model(lowerCAmelCase_ ) self.assertTrue(outputs_dict is not None ) def lowerCamelCase__ ( ) -> List[str]: '''simple docstring''' _snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase_ ( unittest.TestCase ): @cached_property def lowerCAmelCase ( self ) -> Dict: return ( EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' ) if is_vision_available() else None ) @slow def lowerCAmelCase ( self ) -> Union[str, Any]: _snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.05_55, 0.48_25, -0.08_52] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) ) @slow def lowerCAmelCase ( self ) -> str: _snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained( 'snap-research/efficientformer-l1-300' ) _snake_case = self.default_image_processor _snake_case = prepare_img() _snake_case = image_processor(images=lowerCAmelCase_ , return_tensors='tf' ) # forward pass _snake_case = model(**lowerCAmelCase_ , training=lowerCAmelCase_ ) # verify the logits _snake_case = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _snake_case = tf.constant([-0.13_12, 0.43_53, -1.04_99] ) self.assertTrue(np.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
295
0
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() a__ : str = logging.get_logger(__name__) def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaForSequenceClassification.from_pretrained(a__ , config=a__ ) SCREAMING_SNAKE_CASE : int = downstream_dict['''projector.weight'''] SCREAMING_SNAKE_CASE : Any = downstream_dict['''projector.bias'''] SCREAMING_SNAKE_CASE : List[str] = downstream_dict['''model.post_net.linear.weight'''] SCREAMING_SNAKE_CASE : str = downstream_dict['''model.post_net.linear.bias'''] return model def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaForAudioFrameClassification.from_pretrained(a__ , config=a__ ) SCREAMING_SNAKE_CASE : Dict = downstream_dict['''model.linear.weight'''] SCREAMING_SNAKE_CASE : str = downstream_dict['''model.linear.bias'''] return model def UpperCAmelCase_( a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaForXVector.from_pretrained(a__ , config=a__ ) SCREAMING_SNAKE_CASE : int = downstream_dict['''connector.weight'''] SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): SCREAMING_SNAKE_CASE : Dict = downstream_dict[ F"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] SCREAMING_SNAKE_CASE : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] SCREAMING_SNAKE_CASE : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] SCREAMING_SNAKE_CASE : Optional[int] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] SCREAMING_SNAKE_CASE : Union[str, Any] = downstream_dict['''objective.W'''] return model @torch.no_grad() def UpperCAmelCase_( a__ , a__ , a__ , a__ ): """simple docstring""" SCREAMING_SNAKE_CASE : Dict = torch.load(a__ , map_location='''cpu''' ) SCREAMING_SNAKE_CASE : Tuple = checkpoint['''Downstream'''] SCREAMING_SNAKE_CASE : str = WavaVecaConfig.from_pretrained(a__ ) SCREAMING_SNAKE_CASE : List[str] = WavaVecaFeatureExtractor.from_pretrained( a__ , return_attention_mask=a__ , do_normalize=a__ ) SCREAMING_SNAKE_CASE : Tuple = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): SCREAMING_SNAKE_CASE : int = convert_classification(a__ , a__ , a__ ) elif arch.endswith('''ForAudioFrameClassification''' ): SCREAMING_SNAKE_CASE : List[Any] = convert_diarization(a__ , a__ , a__ ) elif arch.endswith('''ForXVector''' ): SCREAMING_SNAKE_CASE : Optional[Any] = convert_xvector(a__ , a__ , a__ ) else: raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: SCREAMING_SNAKE_CASE : Any = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(a__ ) hf_model.save_pretrained(a__ ) if __name__ == "__main__": a__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') a__ : Union[str, Any] = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
313
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class a_ ( a__ ): """simple docstring""" __SCREAMING_SNAKE_CASE : Optional[Any] = (EulerDiscreteScheduler,) __SCREAMING_SNAKE_CASE : Optional[int] = 10 def __lowerCAmelCase ( self , **_lowerCamelCase ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0_0_0_1, '''beta_end''': 0.0_2, '''beta_schedule''': '''linear''', } config.update(**_lowerCamelCase ) return config def __lowerCAmelCase ( self ) ->Tuple: for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Any: for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ): self.check_over_configs(beta_start=_lowerCamelCase , beta_end=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->int: for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->Optional[Any]: for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=_lowerCamelCase ) def __lowerCAmelCase ( self ) ->List[Any]: SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : int = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = self.dummy_model() SCREAMING_SNAKE_CASE : int = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : Any = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[str] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->List[str]: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_scheduler_config(prediction_type='''v_prediction''' ) SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps ) SCREAMING_SNAKE_CASE : Tuple = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : int = self.dummy_model() SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma SCREAMING_SNAKE_CASE : List[str] = sample.to(_lowerCamelCase ) for i, t in enumerate(scheduler.timesteps ): SCREAMING_SNAKE_CASE : str = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = output.prev_sample SCREAMING_SNAKE_CASE : str = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2 assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3 def __lowerCAmelCase ( self ) ->Tuple: SCREAMING_SNAKE_CASE : Optional[int] = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Dict = self.get_scheduler_config() SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[Any] = self.dummy_model() SCREAMING_SNAKE_CASE : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : Dict = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : List[Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[Any] = output.prev_sample SCREAMING_SNAKE_CASE : List[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : str = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2 assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3 def __lowerCAmelCase ( self ) ->Optional[int]: SCREAMING_SNAKE_CASE : Dict = self.scheduler_classes[0] SCREAMING_SNAKE_CASE : Optional[Any] = self.get_scheduler_config() SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**_lowerCamelCase , use_karras_sigmas=_lowerCamelCase ) scheduler.set_timesteps(self.num_inference_steps , device=_lowerCamelCase ) SCREAMING_SNAKE_CASE : Optional[int] = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model() SCREAMING_SNAKE_CASE : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() SCREAMING_SNAKE_CASE : int = sample.to(_lowerCamelCase ) for t in scheduler.timesteps: SCREAMING_SNAKE_CASE : List[Any] = scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Union[str, Any] = model(_lowerCamelCase , _lowerCamelCase ) SCREAMING_SNAKE_CASE : Dict = scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , generator=_lowerCamelCase ) SCREAMING_SNAKE_CASE : int = output.prev_sample SCREAMING_SNAKE_CASE : Optional[Any] = torch.sum(torch.abs(_lowerCamelCase ) ) SCREAMING_SNAKE_CASE : Any = torch.mean(torch.abs(_lowerCamelCase ) ) assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2 assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3
313
1
from __future__ import annotations import math def UpperCamelCase (lowercase_: int , lowercase_: int , lowercase_: bool , lowercase_: list[int] , lowercase_: float ) -> int: if depth < 0: raise ValueError("""Depth cannot be less than 0""" ) if len(lowercase_ ) == 0: raise ValueError("""Scores cannot be empty""" ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , ) return min( minimax(depth + 1 , node_index * 2 , lowercase_ , lowercase_ , lowercase_ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase_ , lowercase_ , lowercase_ ) , ) def UpperCamelCase () -> None: A__ : Any = [90, 23, 6, 33, 21, 65, 123, 34423] A__ : List[Any] = math.log(len(lowercase_ ) , 2 ) print("""Optimal value : """ , end="""""" ) print(minimax(0 , 0 , lowercase_ , lowercase_ , lowercase_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
141
from argparse import ArgumentParser from . import BaseTransformersCLICommand def UpperCamelCase (lowercase_: int ) -> str: return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code ) class _a (__magic_name__ ): '''simple docstring''' @staticmethod def __A ( A__ ): A__ : Any = parser.add_parser("""download""" ) download_parser.add_argument( """--cache-dir""" , type=A__ , default=A__ , help="""Path to location to store the models""" ) download_parser.add_argument( """--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" ) download_parser.add_argument( """--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , ) download_parser.add_argument("""model""" , type=A__ , help="""Name of the model to download""" ) download_parser.set_defaults(func=A__ ) def __init__( self , A__ , A__ , A__ , A__ ): A__ : Union[str, Any] = model A__ : Dict = cache A__ : str = force A__ : Tuple = trust_remote_code def __A ( self ): from ..models.auto import AutoModel, AutoTokenizer AutoModel.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) AutoTokenizer.from_pretrained( self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
141
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _UpperCamelCase : Optional[int] = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : str = ['YolosFeatureExtractor'] _UpperCamelCase : List[Any] = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCamelCase : Dict = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys _UpperCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
220
"""simple docstring""" # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar _UpperCamelCase : Optional[Any] = TypeVar('T') class a ( Generic[T] ): def __init__( self , _lowerCamelCase = True ): lowercase = {} # dictionary of lists lowercase = directed def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCamelCase ) self.adj_list[destination_vertex].append(_lowerCamelCase ) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCamelCase ) lowercase = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(_lowerCamelCase ) lowercase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: lowercase = [destination_vertex] lowercase = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCamelCase ) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(_lowerCamelCase ) lowercase = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: lowercase = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: lowercase = [destination_vertex] lowercase = [] return self def __repr__( self ): return pformat(self.adj_list )
220
1
from __future__ import annotations from collections.abc import Iterator class snake_case_ : def __init__( self :str ,__snake_case :str ) -> List[Any]: a__ = value a__ = None a__ = None class snake_case_ : def __init__( self :Dict ,__snake_case :List[str] ) -> List[Any]: a__ = tree def lowerCamelCase__( self :Optional[Any] ,__snake_case :Dict ) -> Optional[Any]: if node is None: return 0 return node.value + ( self.depth_first_search(node.left ) + self.depth_first_search(node.right ) ) def __iter__( self :Optional[Any] ) -> str: yield self.depth_first_search(self.tree ) if __name__ == "__main__": import doctest doctest.testmod()
355
from collections import defaultdict from math import ceil, sqrt def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_0_0_0 , __lowerCAmelCase : int = 1_0 ): a__ = defaultdict(__lowerCAmelCase ) for outer_width in range(3 , (t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: a__ = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 ) else: a__ = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__lowerCAmelCase , outer_width - 1 , 2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 1_0 ) if __name__ == "__main__": print(f"""{solution() = }""")
109
0
"""simple docstring""" from __future__ import annotations import pandas as pd def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [0] * no_of_processes __SCREAMING_SNAKE_CASE = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = burst_time[i] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 9_9999_9999 __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = False # Process until all processes are completed while complete != no_of_processes: for j in range(lowerCAmelCase_ ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: __SCREAMING_SNAKE_CASE = remaining_time[j] __SCREAMING_SNAKE_CASE = j __SCREAMING_SNAKE_CASE = True if not check: increment_time += 1 continue remaining_time[short] -= 1 __SCREAMING_SNAKE_CASE = remaining_time[short] if minm == 0: __SCREAMING_SNAKE_CASE = 9_9999_9999 if remaining_time[short] == 0: complete += 1 __SCREAMING_SNAKE_CASE = False # Find finish time of current process __SCREAMING_SNAKE_CASE = increment_time + 1 # Calculate waiting time __SCREAMING_SNAKE_CASE = finish_time - arrival_time[short] __SCREAMING_SNAKE_CASE = finar - burst_time[short] if waiting_time[short] < 0: __SCREAMING_SNAKE_CASE = 0 # Increment time increment_time += 1 return waiting_time def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = [0] * no_of_processes for i in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = burst_time[i] + waiting_time[i] return turn_around_time def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ): '''simple docstring''' __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 0 for i in range(lowerCAmelCase_ ): __SCREAMING_SNAKE_CASE = total_waiting_time + waiting_time[i] __SCREAMING_SNAKE_CASE = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print("Average turn around time =" , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('''Enter how many process you want to analyze''') a__ : Optional[Any] = int(input()) a__ : Optional[int] = [0] * no_of_processes a__ : int = [0] * no_of_processes a__ : List[Any] = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('''Enter the arrival time and burst time for process:--''' + str(i + 1)) a__ , a__ : Tuple = map(int, input().split()) a__ : int = calculate_waitingtime(arrival_time, burst_time, no_of_processes) a__ : Dict = burst_time a__ : Any = no_of_processes a__ : Optional[int] = waiting_time a__ : Union[str, Any] = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) a__ : str = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ '''Process''', '''BurstTime''', '''ArrivalTime''', '''WaitingTime''', '''TurnAroundTime''', ], ) # Printing the dataFrame pd.set_option('''display.max_rows''', fcfs.shape[0] + 1) print(fcfs)
54
"""simple docstring""" from __future__ import annotations def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ): '''simple docstring''' if start is None: __SCREAMING_SNAKE_CASE = 0 if end is None: __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ) - 1 if start >= end: return __SCREAMING_SNAKE_CASE = (start + end) // 2 slowsort(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) slowsort(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ ) if sequence[end] < sequence[mid]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sequence[mid], sequence[end] slowsort(lowerCAmelCase_ , lowerCAmelCase_ , end - 1 ) if __name__ == "__main__": from doctest import testmod testmod()
54
1
'''simple docstring''' def lowerCamelCase__ ( _A ): return " ".join( ''.join(word[::-1] ) if len(_A ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
96
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": lowerCAmelCase: Any = argparse.ArgumentParser() parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--txt2img_unclip', default='kakaobrain/karlo-v1-alpha', type=str, required=False, help='The pretrained txt2img unclip.', ) lowerCAmelCase: Optional[int] = parser.parse_args() lowerCAmelCase: List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) lowerCAmelCase: Optional[Any] = CLIPImageProcessor() lowerCAmelCase: Tuple = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14') lowerCAmelCase: List[str] = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
96
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A__ : int = { 'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : List[str] = [ 'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST', 'FalconForCausalLM', 'FalconModel', 'FalconPreTrainedModel', 'FalconForSequenceClassification', 'FalconForTokenClassification', 'FalconForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
207
from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS A__ : Tuple = logging.get_logger(__name__) A__ : int = { 'linear': get_linear_schedule_with_warmup, 'cosine': get_cosine_schedule_with_warmup, 'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup, 'polynomial': get_polynomial_decay_schedule_with_warmup, 'constant': get_constant_schedule, 'constant_w_warmup': get_constant_schedule_with_warmup, } class _UpperCAmelCase ( A__ ): """simple docstring""" def __init__( self : Optional[int], lowerCamelCase : int=None, lowerCamelCase : int=None, *lowerCamelCase : List[Any], **lowerCamelCase : Any ): '''simple docstring''' super().__init__(*lowerCamelCase, **lowerCamelCase ) if config is None: assert isinstance(self.model, lowerCamelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F""" {self.model.__class__}""" ) lowercase__ = self.model.config else: lowercase__ = config lowercase__ = data_args lowercase__ = self.config.tgt_vocab_size if isinstance(self.config, lowerCamelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for""" ''' padding..''' ) if self.args.label_smoothing == 0: lowercase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowercase__ = label_smoothed_nll_loss def lowercase__ ( self : List[Any], lowerCamelCase : int ): '''simple docstring''' if self.optimizer is None: lowercase__ = ['''bias''', '''LayerNorm.weight'''] lowercase__ = [ { '''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], '''weight_decay''': self.args.weight_decay, }, { '''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0, }, ] lowercase__ = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowercase__ = Adafactor lowercase__ = {'''scale_parameter''': False, '''relative_step''': False} else: lowercase__ = AdamW lowercase__ = { '''betas''': (self.args.adam_betaa, self.args.adam_betaa), '''eps''': self.args.adam_epsilon, } lowercase__ = self.args.learning_rate if self.sharded_ddp: lowercase__ = OSS( params=lowerCamelCase, optim=lowerCamelCase, **lowerCamelCase, ) else: lowercase__ = optimizer_cls(lowerCamelCase, **lowerCamelCase ) if self.lr_scheduler is None: lowercase__ = self._get_lr_scheduler(lowerCamelCase ) else: # ignoring --lr_scheduler logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' ) def lowercase__ ( self : List[str], lowerCamelCase : Optional[int] ): '''simple docstring''' lowercase__ = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowercase__ = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowercase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps ) else: lowercase__ = schedule_func( self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=lowerCamelCase ) return scheduler def lowercase__ ( self : List[Any] ): '''simple docstring''' if isinstance(self.train_dataset, torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : str, lowerCamelCase : Union[str, Any] ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0] lowercase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) ) else: # compute usual loss via models lowercase__ , lowercase__ = model(**lowerCamelCase, labels=lowerCamelCase, use_cache=lowerCamelCase )[:2] else: # compute label smoothed loss lowercase__ = model(**lowerCamelCase, use_cache=lowerCamelCase )[0] lowercase__ = torch.nn.functional.log_softmax(lowerCamelCase, dim=-1 ) lowercase__ , lowercase__ = self.loss_fn(lowerCamelCase, lowerCamelCase, self.args.label_smoothing, ignore_index=self.config.pad_token_id ) return loss, logits def lowercase__ ( self : List[str], lowerCamelCase : Optional[Any], lowerCamelCase : Tuple ): '''simple docstring''' lowercase__ = inputs.pop('''labels''' ) lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase ) return loss def lowercase__ ( self : str, lowerCamelCase : nn.Module, lowerCamelCase : Dict[str, Union[torch.Tensor, Any]], lowerCamelCase : bool, lowerCamelCase : Optional[List[str]] = None, ): '''simple docstring''' lowercase__ = self._prepare_inputs(lowerCamelCase ) lowercase__ = { '''max_length''': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, '''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowercase__ = self.model.generate( inputs['''input_ids'''], attention_mask=inputs['''attention_mask'''], **lowerCamelCase, ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] ) lowercase__ = inputs.pop('''labels''' ) with torch.no_grad(): # compute loss on predict data lowercase__ , lowercase__ = self._compute_loss(lowerCamelCase, lowerCamelCase, lowerCamelCase ) lowercase__ = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowercase__ = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowercase__ = self._pad_tensors_to_max_len(lowerCamelCase, gen_kwargs['''max_length'''] ) return (loss, logits, labels) def lowercase__ ( self : List[Any], lowerCamelCase : str, lowerCamelCase : Any ): '''simple docstring''' # If PAD token is not defined at least EOS token has to be defined lowercase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( '''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be''' F""" padded to `max_length`={max_length}""" ) lowercase__ = pad_token_id * torch.ones( (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device ) lowercase__ = tensor return padded_tensor
207
1
"""simple docstring""" def _lowerCAmelCase ( UpperCAmelCase__ : int = 1_0**1_2 ) ->int: A__ : int = 1 A__ : Union[str, Any] = 0 A__ : List[str] = 1 A__ : Any = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(F'{solution() = }')
355
"""simple docstring""" from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING A_ = logging.get_logger(__name__) A_ = Dict[str, Any] A_ = List[Prediction] @add_end_docstrings(UpperCamelCase ) class __SCREAMING_SNAKE_CASE ( UpperCamelCase ): def __init__( self : str , *snake_case : Tuple , **snake_case : Tuple ): '''simple docstring''' super().__init__(*snake_case , **snake_case ) if self.framework == "tf": raise ValueError(F'The {self.__class__} is only available in PyTorch.' ) requires_backends(self , """vision""" ) self.check_model_type( dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) ) def _UpperCamelCase ( self : List[Any] , **snake_case : Optional[int] ): '''simple docstring''' A__ : Dict = {} if "threshold" in kwargs: A__ : int = kwargs["""threshold"""] return {}, {}, postprocess_kwargs def __call__( self : Tuple , *snake_case : Union[str, Any] , **snake_case : Union[str, Any] ): '''simple docstring''' return super().__call__(*snake_case , **snake_case ) def _UpperCamelCase ( self : str , snake_case : int ): '''simple docstring''' A__ : List[str] = load_image(snake_case ) A__ : int = torch.IntTensor([[image.height, image.width]] ) A__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors="""pt""" ) if self.tokenizer is not None: A__ : str = self.tokenizer(text=inputs["""words"""] , boxes=inputs["""boxes"""] , return_tensors="""pt""" ) A__ : List[str] = target_size return inputs def _UpperCamelCase ( self : Optional[int] , snake_case : List[Any] ): '''simple docstring''' A__ : str = model_inputs.pop("""target_size""" ) A__ : Dict = self.model(**snake_case ) A__ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} ) if self.tokenizer is not None: A__ : str = model_inputs["""bbox"""] return model_outputs def _UpperCamelCase ( self : Tuple , snake_case : Optional[int] , snake_case : int=0.9 ): '''simple docstring''' A__ : Any = model_outputs["""target_size"""] if self.tokenizer is not None: # This is a LayoutLMForTokenClassification variant. # The OCR got the boxes and the model classified the words. A__ , A__ : Tuple = target_size[0].tolist() def unnormalize(snake_case : Optional[int] ): return self._get_bounding_box( torch.Tensor( [ (width * bbox[0] / 1000), (height * bbox[1] / 1000), (width * bbox[2] / 1000), (height * bbox[3] / 1000), ] ) ) A__ , A__ : Optional[int] = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 ) A__ : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()] A__ : List[str] = [unnormalize(snake_case ) for bbox in model_outputs["""bbox"""].squeeze(0 )] A__ : Tuple = ["""score""", """label""", """box"""] A__ : Any = [dict(zip(snake_case , snake_case ) ) for vals in zip(scores.tolist() , snake_case , snake_case ) if vals[0] > threshold] else: # This is a regular ForObjectDetectionModel A__ : Union[str, Any] = self.image_processor.post_process_object_detection(snake_case , snake_case , snake_case ) A__ : str = raw_annotations[0] A__ : str = raw_annotation["""scores"""] A__ : List[Any] = raw_annotation["""labels"""] A__ : int = raw_annotation["""boxes"""] A__ : str = scores.tolist() A__ : Any = [self.model.config.idalabel[label.item()] for label in labels] A__ : int = [self._get_bounding_box(snake_case ) for box in boxes] # {"scores": [...], ...} --> [{"score":x, ...}, ...] A__ : str = ["""score""", """label""", """box"""] A__ : Dict = [ dict(zip(snake_case , snake_case ) ) for vals in zip(raw_annotation["""scores"""] , raw_annotation["""labels"""] , raw_annotation["""boxes"""] ) ] return annotation def _UpperCamelCase ( self : Union[str, Any] , snake_case : "torch.Tensor" ): '''simple docstring''' if self.framework != "pt": raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" ) A__ , A__ , A__ , A__ : Any = box.int().tolist() A__ : Any = { """xmin""": xmin, """ymin""": ymin, """xmax""": xmax, """ymax""": ymax, } return bbox
296
0
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = True , UpperCamelCase__ = math.inf , UpperCamelCase__ = -math.inf , UpperCamelCase__ = math.inf , UpperCamelCase__ = -math.inf , UpperCamelCase__ = False , UpperCamelCase__ = 1_00 , UpperCamelCase__ = 0.0_1 , UpperCamelCase__ = 1 , ) -> Any: __lowerCamelCase = False __lowerCamelCase = search_prob __lowerCamelCase = start_temperate __lowerCamelCase = [] __lowerCamelCase = 0 __lowerCamelCase = None while not search_end: __lowerCamelCase = current_state.score() if best_state is None or current_score > best_state.score(): __lowerCamelCase = current_state scores.append(UpperCamelCase__ ) iterations += 1 __lowerCamelCase = None __lowerCamelCase = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to __lowerCamelCase = random.randint(0 , len(UpperCamelCase__ ) - 1 ) # picking a random neighbor __lowerCamelCase = neighbors.pop(UpperCamelCase__ ) __lowerCamelCase = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: __lowerCamelCase = change * -1 # in case we are finding minimum if change > 0: # improves the solution __lowerCamelCase = picked_neighbor else: __lowerCamelCase = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability __lowerCamelCase = picked_neighbor __lowerCamelCase = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor __lowerCamelCase = True else: __lowerCamelCase = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(UpperCamelCase__ ) , UpperCamelCase__ ) plt.xlabel('''Iterations''' ) plt.ylabel('''Function values''' ) plt.show() return best_state if __name__ == "__main__": def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Any: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) __UpperCAmelCase =SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) __UpperCAmelCase =simulated_annealing( prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( "The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) # starting the problem with initial coordinates (12, 47) __UpperCAmelCase =SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa) __UpperCAmelCase =simulated_annealing( prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True ) print( "The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f'and 50 > y > - 5 found via hill climbing: {local_min.score()}' ) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str: return (3 * x**2) - (6 * y) __UpperCAmelCase =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __UpperCAmelCase =simulated_annealing(prob, find_max=False, visualization=True) print( "The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f'{local_min.score()}' ) __UpperCAmelCase =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) __UpperCAmelCase =simulated_annealing(prob, find_max=True, visualization=True) print( "The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f'{local_min.score()}' )
67
'''simple docstring''' import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class a__ : def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ): """simple docstring""" __lowerCamelCase = parent __lowerCamelCase = batch_size __lowerCamelCase = seq_length __lowerCamelCase = is_training __lowerCamelCase = use_token_type_ids __lowerCamelCase = use_labels __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_act __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = type_sequence_label_size __lowerCamelCase = initializer_range __lowerCamelCase = num_labels __lowerCamelCase = num_choices __lowerCamelCase = scope __lowerCamelCase = self.vocab_size - 1 def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase = None if self.use_token_type_ids: __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase = None __lowerCamelCase = None __lowerCamelCase = None if self.use_labels: __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase = OpenAIGPTConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , ) __lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTModel(config=a ) model.to(a ) model.eval() __lowerCamelCase = model(a , token_type_ids=a , head_mask=a ) __lowerCamelCase = model(a , token_type_ids=a ) __lowerCamelCase = model(a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTLMHeadModel(a ) model.to(a ) model.eval() __lowerCamelCase = model(a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTDoubleHeadsModel(a ) model.to(a ) model.eval() __lowerCamelCase = model(a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ): """simple docstring""" __lowerCamelCase = self.num_labels __lowerCamelCase = OpenAIGPTForSequenceClassification(a ) model.to(a ) model.eval() __lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase = model(a , token_type_ids=a , labels=a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) = config_and_inputs __lowerCamelCase = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_torch class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): lowerCamelCase : List[str] =( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) lowerCamelCase : str =( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly lowerCamelCase : Optional[int] =( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ): """simple docstring""" if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests": # Get `tokenizer does not have a padding token` error for both fast/slow tokenizers. # `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a # tiny config could not be created. return True return False def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ): """simple docstring""" __lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a ) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , ) __lowerCamelCase = inputs_dict['''labels'''] __lowerCamelCase = inputs_dict['''labels'''] __lowerCamelCase = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , ) __lowerCamelCase = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a ) return inputs_dict def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ): """simple docstring""" __lowerCamelCase = OpenAIGPTModelTester(self ) __lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" self.config_tester.run_common_tests() def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*a ) def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*a ) def SCREAMING_SNAKE_CASE__ ( self : List[str] ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*a ) def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" __lowerCamelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a ) @slow def SCREAMING_SNAKE_CASE__ ( self : Any ): """simple docstring""" for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCamelCase = OpenAIGPTModel.from_pretrained(a ) self.assertIsNotNone(a ) @require_torch class a__ ( unittest.TestCase ): @slow def SCREAMING_SNAKE_CASE__ ( self : str ): """simple docstring""" __lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' ) model.to(a ) __lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is __lowerCamelCase = [ 4_81, 47_35, 5_44, 2_46, 9_63, 8_70, 7_62, 2_39, 2_44, 4_04_77, 2_44, 2_49, 7_19, 8_81, 4_87, 5_44, 2_40, 2_44, 6_03, 4_81, ] # the president is a very good man. " \n " i\'m sure he is, " said the __lowerCamelCase = model.generate(a , do_sample=a ) self.assertListEqual(output_ids[0].tolist() , a )
67
1
'''simple docstring''' def _lowerCAmelCase ( lowercase = 100 ) -> int: __lowerCAmelCase = 0 __lowerCAmelCase = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f'{solution() = }')
46
'''simple docstring''' import warnings from .generation import TFGenerationMixin class _UpperCAmelCase ( lowerCAmelCase_ ): # warning at import time warnings.warn( """Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """ """be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , lowerCAmelCase_ , )
46
1
"""simple docstring""" import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, TaForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeqaSeqDataset, SeqaSeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'summarization' _SCREAMING_SNAKE_CASE = ['loss'] _SCREAMING_SNAKE_CASE = ROUGE_KEYS _SCREAMING_SNAKE_CASE = 'rouge2' def __init__( self , lowercase , **lowercase ) -> str: if hparams.sortish_sampler and hparams.gpus > 1: lowerCAmelCase = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" ) if hparams.sortish_sampler: raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" ) super().__init__(lowercase , num_labels=lowercase , mode=self.mode , **lowercase ) use_task_specific_params(self.model , """summarization""" ) save_git_info(self.hparams.output_dir ) lowerCAmelCase = Path(self.output_dir ) / """metrics.json""" lowerCAmelCase = Path(self.output_dir ) / """hparams.pkl""" pickle_save(self.hparams , self.hparams_save_path ) lowerCAmelCase = 0 lowerCAmelCase = defaultdict(lowercase ) lowerCAmelCase = self.config.model_type lowerCAmelCase = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size lowerCAmelCase = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } lowerCAmelCase = { """train""": self.hparams.n_train, """val""": self.hparams.n_val, """test""": self.hparams.n_test, } lowerCAmelCase = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} lowerCAmelCase = { """train""": self.hparams.max_target_length, """val""": self.hparams.val_max_target_length, """test""": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f'target_lens: {self.target_lens}' assert self.target_lens["train"] <= self.target_lens["test"], f'target_lens: {self.target_lens}' if self.hparams.freeze_embeds: freeze_embeds(self.model ) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder() ) assert_all_frozen(self.model.get_encoder() ) lowerCAmelCase = get_git_info()["""repo_sha"""] lowerCAmelCase = hparams.num_workers lowerCAmelCase = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowercase ): lowerCAmelCase = self.tokenizer.lang_code_to_id[hparams.tgt_lang] lowerCAmelCase = self.decoder_start_token_id lowerCAmelCase = ( SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset ) lowerCAmelCase = False lowerCAmelCase = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: lowerCAmelCase = self.hparams.eval_max_gen_length else: lowerCAmelCase = self.model.config.max_length lowerCAmelCase = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def _snake_case ( self , lowercase ) -> Dict[str, List[str]]: lowerCAmelCase = { k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items() } save_json(lowercase , Path(self.output_dir ) / """text_batch.json""" ) save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" ) lowerCAmelCase = True return readable_batch def _snake_case ( self , lowercase , **lowercase ) -> Union[str, Any]: return self.model(lowercase , **lowercase ) def _snake_case ( self , lowercase ) -> Union[str, Any]: lowerCAmelCase = self.tokenizer.batch_decode( lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) return lmap(str.strip , lowercase ) def _snake_case ( self , lowercase ) -> Tuple: lowerCAmelCase = self.tokenizer.pad_token_id lowerCAmelCase , lowerCAmelCase = batch["""input_ids"""], batch["""attention_mask"""] lowerCAmelCase = batch["""labels"""] if isinstance(self.model , lowercase ): lowerCAmelCase = self.model._shift_right(lowercase ) else: lowerCAmelCase = shift_tokens_right(lowercase , lowercase ) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero lowerCAmelCase = decoder_input_ids self.save_readable_batch(lowercase ) lowerCAmelCase = self(lowercase , attention_mask=lowercase , decoder_input_ids=lowercase , use_cache=lowercase ) lowerCAmelCase = outputs["""logits"""] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id lowerCAmelCase = nn.CrossEntropyLoss(ignore_index=lowercase ) assert lm_logits.shape[-1] == self.vocab_size lowerCAmelCase = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) ) else: lowerCAmelCase = nn.functional.log_softmax(lowercase , dim=-1 ) lowerCAmelCase , lowerCAmelCase = label_smoothed_nll_loss( lowercase , lowercase , self.hparams.label_smoothing , ignore_index=lowercase ) return (loss,) @property def _snake_case ( self ) -> int: return self.tokenizer.pad_token_id def _snake_case ( self , lowercase , lowercase ) -> Dict: lowerCAmelCase = self._step(lowercase ) lowerCAmelCase = dict(zip(self.loss_names , lowercase ) ) # tokens per batch lowerCAmelCase = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum() lowerCAmelCase = batch["""input_ids"""].shape[0] lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).sum() lowerCAmelCase = batch["""input_ids"""].eq(self.pad ).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def _snake_case ( self , lowercase , lowercase ) -> Dict: return self._generative_step(lowercase ) def _snake_case ( self , lowercase , lowercase="val" ) -> Dict: self.step_count += 1 lowerCAmelCase = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names} lowerCAmelCase = losses["""loss"""] lowerCAmelCase = { k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""] } lowerCAmelCase = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) lowerCAmelCase = torch.tensor(lowercase ).type_as(lowercase ) generative_metrics.update({k: v.item() for k, v in losses.items()} ) losses.update(lowercase ) lowerCAmelCase = {f'{prefix}_avg_{k}': x for k, x in losses.items()} lowerCAmelCase = self.step_count self.metrics[prefix].append(lowercase ) # callback writes this to self.metrics_save_path lowerCAmelCase = flatten_list([x["""preds"""] for x in outputs] ) return { "log": all_metrics, "preds": preds, f'{prefix}_loss': loss, f'{prefix}_{self.val_metric}': metric_tensor, } def _snake_case ( self , lowercase , lowercase ) -> Dict: return calculate_rouge(lowercase , lowercase ) def _snake_case ( self , lowercase ) -> dict: lowerCAmelCase = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') lowerCAmelCase = self.model.generate( batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowercase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , ) lowerCAmelCase = (time.time() - ta) / batch["""input_ids"""].shape[0] lowerCAmelCase = self.ids_to_clean_text(lowercase ) lowerCAmelCase = self.ids_to_clean_text(batch["""labels"""] ) lowerCAmelCase = self._step(lowercase ) lowerCAmelCase = dict(zip(self.loss_names , lowercase ) ) lowerCAmelCase = self.calc_generative_metrics(lowercase , lowercase ) lowerCAmelCase = np.mean(lmap(lowercase , lowercase ) ) base_metrics.update(gen_time=lowercase , gen_len=lowercase , preds=lowercase , target=lowercase , **lowercase ) return base_metrics def _snake_case ( self , lowercase , lowercase ) -> Dict: return self._generative_step(lowercase ) def _snake_case ( self , lowercase ) -> int: return self.validation_epoch_end(lowercase , prefix="""test""" ) def _snake_case ( self , lowercase ) -> SeqaSeqDataset: lowerCAmelCase = self.n_obs[type_path] lowerCAmelCase = self.target_lens[type_path] lowerCAmelCase = self.dataset_class( self.tokenizer , type_path=lowercase , n_obs=lowercase , max_target_length=lowercase , **self.dataset_kwargs , ) return dataset def _snake_case ( self , lowercase , lowercase , lowercase = False ) -> DataLoader: lowerCAmelCase = self.get_dataset(lowercase ) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": lowerCAmelCase = dataset.make_sortish_sampler(lowercase , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": lowerCAmelCase = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 ) return DataLoader( lowercase , batch_sampler=lowercase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , ) else: return DataLoader( lowercase , batch_size=lowercase , collate_fn=dataset.collate_fn , shuffle=lowercase , num_workers=self.num_workers , sampler=lowercase , ) def _snake_case ( self ) -> DataLoader: lowerCAmelCase = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowercase ) return dataloader def _snake_case ( self ) -> DataLoader: return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size ) def _snake_case ( self ) -> DataLoader: return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size ) @staticmethod def _snake_case ( lowercase , lowercase ) -> Optional[int]: BaseTransformer.add_model_specific_args(lowercase , lowercase ) add_generic_args(lowercase , lowercase ) parser.add_argument( """--max_source_length""" , default=1_024 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--max_target_length""" , default=56 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--val_max_target_length""" , default=142 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--test_max_target_length""" , default=142 , type=lowercase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument("""--freeze_encoder""" , action="""store_true""" ) parser.add_argument("""--freeze_embeds""" , action="""store_true""" ) parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowercase ) parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowercase ) parser.add_argument("""--max_tokens_per_batch""" , type=lowercase , default=lowercase ) parser.add_argument("""--logger_name""" , type=lowercase , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" ) parser.add_argument("""--n_train""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_val""" , type=lowercase , default=500 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--n_test""" , type=lowercase , default=-1 , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument( """--task""" , type=lowercase , default="""summarization""" , required=lowercase , help="""# examples. -1 means use all.""" ) parser.add_argument("""--label_smoothing""" , type=lowercase , default=0.0 , required=lowercase ) parser.add_argument("""--src_lang""" , type=lowercase , default="""""" , required=lowercase ) parser.add_argument("""--tgt_lang""" , type=lowercase , default="""""" , required=lowercase ) parser.add_argument("""--eval_beams""" , type=lowercase , default=lowercase , required=lowercase ) parser.add_argument( """--val_metric""" , type=lowercase , default=lowercase , required=lowercase , choices=["""bleu""", """rouge2""", """loss""", None] ) parser.add_argument("""--eval_max_gen_length""" , type=lowercase , default=lowercase , help="""never generate more than n tokens""" ) parser.add_argument("""--save_top_k""" , type=lowercase , default=1 , required=lowercase , help="""How many checkpoints to save""" ) parser.add_argument( """--early_stopping_patience""" , type=lowercase , default=-1 , required=lowercase , help=( """-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So""" """ val_check_interval will effect it.""" ) , ) return parser class lowercase ( _UpperCAmelCase ): _SCREAMING_SNAKE_CASE = 'translation' _SCREAMING_SNAKE_CASE = ['loss'] _SCREAMING_SNAKE_CASE = ['bleu'] _SCREAMING_SNAKE_CASE = 'bleu' def __init__( self , lowercase , **lowercase ) -> Union[str, Any]: super().__init__(lowercase , **lowercase ) lowerCAmelCase = hparams.src_lang lowerCAmelCase = hparams.tgt_lang def _snake_case ( self , lowercase , lowercase ) -> dict: return calculate_bleu(lowercase , lowercase ) def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : int=None ): '''simple docstring''' Path(args.output_dir ).mkdir(exist_ok=SCREAMING_SNAKE_CASE ) check_output_dir(SCREAMING_SNAKE_CASE , expected_items=3 ) if model is None: if "summarization" in args.task: lowerCAmelCase = SummarizationModule(SCREAMING_SNAKE_CASE ) else: lowerCAmelCase = TranslationModule(SCREAMING_SNAKE_CASE ) lowerCAmelCase = Path(args.data_dir ).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir ).startswith("""/tmp""" ) or str(args.output_dir ).startswith("""/var""" ) ): lowerCAmelCase = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase = os.environ.get("""WANDB_PROJECT""" , SCREAMING_SNAKE_CASE ) lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=SCREAMING_SNAKE_CASE ) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger lowerCAmelCase = WandbLogger(name=model.output_dir.name , project=F'hf_{dataset}' ) if args.early_stopping_patience >= 0: lowerCAmelCase = get_early_stopping_callback(model.val_metric , args.early_stopping_patience ) else: lowerCAmelCase = False lowerCAmelCase = args.val_metric == """loss""" lowerCAmelCase = generic_train( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback( args.output_dir , model.val_metric , args.save_top_k , SCREAMING_SNAKE_CASE ) , early_stopping_callback=SCREAMING_SNAKE_CASE , logger=SCREAMING_SNAKE_CASE , ) pickle_save(model.hparams , model.output_dir / """hparams.pkl""" ) if not args.do_predict: return model lowerCAmelCase = """""" lowerCAmelCase = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=SCREAMING_SNAKE_CASE ) ) if checkpoints: lowerCAmelCase = checkpoints[-1] lowerCAmelCase = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams ) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() SCREAMING_SNAKE_CASE__ = pl.Trainer.add_argparse_args(parser) SCREAMING_SNAKE_CASE__ = SummarizationModule.add_model_specific_args(parser, os.getcwd()) SCREAMING_SNAKE_CASE__ = parser.parse_args() main(args)
46
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig lowerCamelCase__ = { """albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""", """albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""", """albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""", """albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""", """albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""", """albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""", """albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""", """albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): __lowerCamelCase : Optional[Any] ='albert' def __init__( self : Optional[Any] , __lowercase : Union[str, Any]=30000 , __lowercase : List[str]=128 , __lowercase : Optional[Any]=4096 , __lowercase : Dict=12 , __lowercase : Any=1 , __lowercase : Optional[Any]=64 , __lowercase : Any=16384 , __lowercase : Any=1 , __lowercase : Union[str, Any]="gelu_new" , __lowercase : List[str]=0 , __lowercase : int=0 , __lowercase : Dict=512 , __lowercase : str=2 , __lowercase : List[str]=0.02 , __lowercase : Union[str, Any]=1E-12 , __lowercase : int=0.1 , __lowercase : Any="absolute" , __lowercase : Optional[int]=0 , __lowercase : Dict=2 , __lowercase : Optional[Any]=3 , **__lowercase : Any , ): '''simple docstring''' super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) __a = vocab_size __a = embedding_size __a = hidden_size __a = num_hidden_layers __a = num_hidden_groups __a = num_attention_heads __a = inner_group_num __a = hidden_act __a = intermediate_size __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = type_vocab_size __a = initializer_range __a = layer_norm_eps __a = classifier_dropout_prob __a = position_embedding_type class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ): @property def UpperCamelCase_ ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": __a = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __a = {0: """batch""", 1: """sequence"""} return OrderedDict( [ ("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis), ] )
302
0
'''simple docstring''' import json import os from typing import Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { 'vocab_file': 'vocab.json', 'tokenizer_config_file': 'tokenizer_config.json', 'merges_file': 'merges.txt', } UpperCAmelCase = { 'vocab_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json' ), }, 'tokenizer_config_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json' ), }, 'merges_file': { 'facebook/s2t-wav2vec2-large-en-de': ( 'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt' ), }, } UpperCAmelCase = '</w>' UpperCAmelCase = '@@ ' def _snake_case ( _SCREAMING_SNAKE_CASE : List[Any] ) -> str: """simple docstring""" lowerCAmelCase = set() lowerCAmelCase = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase = char return pairs # Speech2Text2 has no max input length UpperCAmelCase = {'facebook/s2t-wav2vec2-large-en-de': 1024} class __snake_case( _lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : Tuple = VOCAB_FILES_NAMES UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase : Any = ["input_ids", "attention_mask"] def __init__( self , A_ , A_="<s>" , A_="<pad>" , A_="</s>" , A_="<unk>" , A_=False , A_=None , **A_ , ) -> Tuple: super().__init__( unk_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , do_lower_case=A_ , **A_ , ) lowerCAmelCase = do_lower_case with open(A_ , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase = json.load(A_ ) lowerCAmelCase = {v: k for k, v in self.encoder.items()} if merges_file is None: logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.' ) lowerCAmelCase = None lowerCAmelCase = None else: with open(A_ , encoding="""utf-8""" ) as merges_handle: lowerCAmelCase = merges_handle.read().split("""\n""" )[:-1] lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges] lowerCAmelCase = dict(zip(A_ , range(len(A_ ) ) ) ) lowerCAmelCase = {} @property def __snake_case ( self ) -> int: return len(self.decoder ) def __snake_case ( self ) -> Dict: return dict(self.encoder , **self.added_tokens_encoder ) def __snake_case ( self , A_ ) -> Optional[Any]: lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,) if token in self.cache: return self.cache[token] lowerCAmelCase = get_pairs(A_ ) if not pairs: return token while True: lowerCAmelCase = min(A_ , key=lambda A_ : self.bpe_ranks.get(A_ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase, lowerCAmelCase = bigram lowerCAmelCase = [] lowerCAmelCase = 0 while i < len(A_ ): try: lowerCAmelCase = word.index(A_ , A_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase = j if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase = tuple(A_ ) lowerCAmelCase = new_word if len(A_ ) == 1: break else: lowerCAmelCase = get_pairs(A_ ) lowerCAmelCase = """ """.join(A_ ) if word == "\n " + BPE_TOKEN_MERGES: lowerCAmelCase = """\n""" + BPE_TOKEN_MERGES if word.endswith(A_ ): lowerCAmelCase = word.replace(A_ , """""" ) lowerCAmelCase = word.replace(""" """ , A_ ) lowerCAmelCase = word return word def __snake_case ( self , A_ ) -> List[Any]: if self.bpe_ranks is None: raise ValueError( """This tokenizer was instantiated without a `merges.txt` file, so""" """ that it can only be used for decoding, not for encoding.""" """Make sure to provide `merges.txt` file at instantiation to enable """ """encoding.""" ) if self.do_lower_case: lowerCAmelCase = text.lower() lowerCAmelCase = text.split() lowerCAmelCase = [] for token in text: if token: split_tokens.extend(list(self.bpe(A_ ).split(""" """ ) ) ) return split_tokens def __snake_case ( self , A_ ) -> int: return self.encoder.get(A_ , self.encoder.get(self.unk_token ) ) def __snake_case ( self , A_ ) -> str: lowerCAmelCase = self.decoder.get(A_ , self.unk_token ) return result def __snake_case ( self , A_ ) -> str: lowerCAmelCase = """ """.join(A_ ) # make sure @@ tokens are concatenated lowerCAmelCase = """""".join(string.split(A_ ) ) return string def __snake_case ( self , A_ , A_ = None ) -> Tuple[str]: if not os.path.isdir(A_ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return lowerCAmelCase = os.path.join( A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase = os.path.join( A_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(A_ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=A_ , ensure_ascii=A_ ) + """\n""" ) lowerCAmelCase = 0 if self.bpe_ranks is None: return (vocab_file,) with open(A_ , """w""" , encoding="""utf-8""" ) as writer: for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A_ : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase = token_index writer.write(""" """.join(A_ ) + """\n""" ) index += 1 return (vocab_file, merges_file)
355
'''simple docstring''' from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { 'huggingface/time-series-transformer-tourism-monthly': ( 'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json' ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class __snake_case( _lowerCAmelCase ): '''simple docstring''' UpperCAmelCase : List[Any] = "time_series_transformer" UpperCAmelCase : int = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers", } def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.0_2 , A_=True , **A_ , ) -> Optional[Any]: # time series specific configuration lowerCAmelCase = prediction_length lowerCAmelCase = context_length or prediction_length lowerCAmelCase = distribution_output lowerCAmelCase = loss lowerCAmelCase = input_size lowerCAmelCase = num_time_features lowerCAmelCase = lags_sequence lowerCAmelCase = scaling lowerCAmelCase = num_dynamic_real_features lowerCAmelCase = num_static_real_features lowerCAmelCase = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(A_ ) != num_static_categorical_features: raise ValueError( """The cardinality should be a list of the same length as `num_static_categorical_features`""" ) lowerCAmelCase = cardinality else: lowerCAmelCase = [0] if embedding_dimension and num_static_categorical_features > 0: if len(A_ ) != num_static_categorical_features: raise ValueError( """The embedding dimension should be a list of the same length as `num_static_categorical_features`""" ) lowerCAmelCase = embedding_dimension else: lowerCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality] lowerCAmelCase = num_parallel_samples # Transformer architecture configuration lowerCAmelCase = input_size * len(A_ ) + self._number_of_features lowerCAmelCase = d_model lowerCAmelCase = encoder_attention_heads lowerCAmelCase = decoder_attention_heads lowerCAmelCase = encoder_ffn_dim lowerCAmelCase = decoder_ffn_dim lowerCAmelCase = encoder_layers lowerCAmelCase = decoder_layers lowerCAmelCase = dropout lowerCAmelCase = attention_dropout lowerCAmelCase = activation_dropout lowerCAmelCase = encoder_layerdrop lowerCAmelCase = decoder_layerdrop lowerCAmelCase = activation_function lowerCAmelCase = init_std lowerCAmelCase = use_cache super().__init__(is_encoder_decoder=A_ , **A_ ) @property def __snake_case ( self ) -> int: return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
187
0
'''simple docstring''' import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase_ ( _lowerCAmelCase ): """simple docstring""" lowercase = "sew" def __init__( self : Union[str, Any] , snake_case_ : Any=32 , snake_case_ : Dict=768 , snake_case_ : Dict=12 , snake_case_ : Dict=12 , snake_case_ : List[Any]=3_072 , snake_case_ : Tuple=2 , snake_case_ : List[str]="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : List[Any]=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : List[str]=0.0 , snake_case_ : int=0.1 , snake_case_ : Union[str, Any]=0.1 , snake_case_ : List[str]=0.02 , snake_case_ : Optional[Any]=1E-5 , snake_case_ : Tuple="group" , snake_case_ : Optional[Any]="gelu" , snake_case_ : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , snake_case_ : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , snake_case_ : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , snake_case_ : str=False , snake_case_ : Optional[int]=128 , snake_case_ : List[Any]=16 , snake_case_ : str=True , snake_case_ : str=0.05 , snake_case_ : int=10 , snake_case_ : List[Any]=2 , snake_case_ : str=0.0 , snake_case_ : str=10 , snake_case_ : int=0 , snake_case_ : int="mean" , snake_case_ : str=False , snake_case_ : Optional[Any]=False , snake_case_ : Optional[Any]=256 , snake_case_ : int=0 , snake_case_ : Tuple=1 , snake_case_ : Dict=2 , **snake_case_ : Any , ): super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase ) snake_case__ : Any = hidden_size snake_case__ : List[Any] = feat_extract_norm snake_case__ : Optional[Any] = feat_extract_activation snake_case__ : Dict = list(_lowercase ) snake_case__ : List[str] = list(_lowercase ) snake_case__ : Any = list(_lowercase ) snake_case__ : Dict = conv_bias snake_case__ : str = num_conv_pos_embeddings snake_case__ : Optional[Any] = num_conv_pos_embedding_groups snake_case__ : Optional[int] = len(self.conv_dim ) snake_case__ : str = num_hidden_layers snake_case__ : Any = intermediate_size snake_case__ : Union[str, Any] = squeeze_factor snake_case__ : Optional[Any] = hidden_act snake_case__ : Optional[Any] = num_attention_heads snake_case__ : List[str] = hidden_dropout snake_case__ : Optional[Any] = attention_dropout snake_case__ : List[Any] = activation_dropout snake_case__ : Optional[int] = feat_proj_dropout snake_case__ : Dict = final_dropout snake_case__ : Optional[Any] = layerdrop snake_case__ : Union[str, Any] = layer_norm_eps snake_case__ : Tuple = initializer_range snake_case__ : Optional[Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)" f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ : Optional[int] = apply_spec_augment snake_case__ : Dict = mask_time_prob snake_case__ : Dict = mask_time_length snake_case__ : Union[str, Any] = mask_time_min_masks snake_case__ : Union[str, Any] = mask_feature_prob snake_case__ : Any = mask_feature_length snake_case__ : int = mask_feature_min_masks # ctc loss snake_case__ : Optional[int] = ctc_loss_reduction snake_case__ : Optional[int] = ctc_zero_infinity # sequence classification snake_case__ : List[Any] = use_weighted_layer_sum snake_case__ : List[Any] = classifier_proj_size @property def lowerCamelCase ( self : int ): return functools.reduce(operator.mul , self.conv_stride , 1 )
35
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowercase : int = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
332
0
from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo _lowercase: List[Any] = '''\ @misc{wu2016googles, title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation}, author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes and Jeffrey Dean}, year={2016}, eprint={1609.08144}, archivePrefix={arXiv}, primaryClass={cs.CL} } ''' _lowercase: Union[str, Any] = '''\ The BLEU score has some undesirable properties when used for single sentences, as it was designed to be a corpus measure. We therefore use a slightly different score for our RL experiments which we call the \'GLEU score\'. For the GLEU score, we record all sub-sequences of 1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then compute a recall, which is the ratio of the number of matching n-grams to the number of total n-grams in the target (ground truth) sequence, and a precision, which is the ratio of the number of matching n-grams to the number of total n-grams in the generated output sequence. Then GLEU score is simply the minimum of recall and precision. This GLEU score\'s range is always between 0 (no matches) and 1 (all match) and it is symmetrical when switching output and target. According to our experiments, GLEU score correlates quite well with the BLEU metric on a corpus level but does not have its drawbacks for our per sentence reward objective. ''' _lowercase: Tuple = '''\ Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references. Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values. Args: predictions (list of str): list of translations to score. Each translation should be tokenized into a list of tokens. references (list of list of str): list of lists of references for each translation. Each reference should be tokenized into a list of tokens. min_len (int): The minimum order of n-gram this function should extract. Defaults to 1. max_len (int): The maximum order of n-gram this function should extract. Defaults to 4. Returns: \'google_bleu\': google_bleu score Examples: Example 1: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.44 Example 2: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references) >>> print(round(results["google_bleu"], 2)) 0.61 Example 3: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2) >>> print(round(results["google_bleu"], 2)) 0.53 Example 4: >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\', ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\'] >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\', ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\', ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\'] >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\', ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\', ... \'heed\', \'the\', \'cat\', \'commands\'] >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\', ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\', ... \'of\', \'the\', \'cat\'] >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\', ... \'interested\', \'in\', \'world\', \'history\'] >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\', ... \'because\', \'he\', \'read\', \'the\', \'book\'] >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]] >>> hypotheses = [hyp1, hyp2] >>> google_bleu = datasets.load_metric("google_bleu") >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6) >>> print(round(results["google_bleu"], 2)) 0.4 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ), "references": datasets.Sequence( datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ), } ) , ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 1 , lowerCamelCase_ = 4 , ): """simple docstring""" return { "google_bleu": gleu_score.corpus_gleu( list_of_references=lowerCAmelCase__ , hypotheses=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ ) }
362
import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness _lowercase: Optional[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n" _lowercase: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n" _lowercase: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n" _lowercase: List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n" _lowercase: List[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE." @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Value("string" ), } ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 100] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ): """simple docstring""" if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1": raise ValueError(_WARNING ) if os.name == "nt": raise NotImplementedError("This metric is currently not supported on Windows." ) with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor: a = [] a = Counter() a = 0 a = defaultdict(lowerCamelCase_ ) for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ): for candidate in candidates: a = candidate + "\n" + test_case a = (test_program, timeout, task_id, completion_id[task_id]) a = executor.submit(lowerCamelCase_ , *lowerCamelCase_ ) futures.append(lowerCamelCase_ ) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(lowerCamelCase_ ): a = future.result() results[result["task_id"]].append((result["completion_id"], result) ) a , a = [], [] for result in results.values(): result.sort() a = [r[1]["passed"] for r in result] total.append(len(lowerCamelCase_ ) ) correct.append(sum(lowerCamelCase_ ) ) a = np.array(lowerCamelCase_ ) a = np.array(lowerCamelCase_ ) a = k a = {F'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()} return pass_at_k, results def a( A : Optional[Any] , A : str , A : Dict ) -> Optional[int]: """simple docstring""" def estimator(A : int , A : int , A : int ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(A , A ): a = itertools.repeat(A , len(A ) ) else: assert len(A ) == len(A ) a = iter(A ) return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
71
0
"""simple docstring""" def _snake_case ( lowercase__ : List[Any] , lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : Any ) -> int: '''simple docstring''' lowerCAmelCase_ :int = [False] * len(lowercase__ ) lowerCAmelCase_ :str = [] queue.append(lowercase__ ) lowerCAmelCase_ :Any = True while queue: lowerCAmelCase_ :Optional[int] = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(lowercase__ ) lowerCAmelCase_ :Union[str, Any] = True lowerCAmelCase_ :int = u return visited[t] def _snake_case ( lowercase__ : int , lowercase__ : Optional[int] , lowercase__ : str ) -> Dict: '''simple docstring''' lowerCAmelCase_ :List[Any] = [-1] * (len(lowercase__ )) lowerCAmelCase_ :str = 0 while bfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): lowerCAmelCase_ :List[str] = float("""Inf""" ) lowerCAmelCase_ :List[str] = sink while s != source: # Find the minimum value in select path lowerCAmelCase_ :Any = min(lowercase__ , graph[parent[s]][s] ) lowerCAmelCase_ :Union[str, Any] = parent[s] max_flow += path_flow lowerCAmelCase_ :Tuple = sink while v != source: lowerCAmelCase_ :List[str] = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase_ :Union[str, Any] = parent[v] return max_flow __UpperCAmelCase = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] __UpperCAmelCase , __UpperCAmelCase = 0, 5 print(ford_fulkerson(graph, source, sink))
84
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class snake_case__ : def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=99 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=50 , lowerCamelCase=0.02 , lowerCamelCase=True , lowerCamelCase=None , ): __a = parent __a = batch_size __a = seq_length __a = is_training __a = use_input_mask __a = vocab_size __a = hidden_size __a = num_hidden_layers __a = num_attention_heads __a = intermediate_size __a = hidden_act __a = hidden_dropout_prob __a = attention_probs_dropout_prob __a = max_position_embeddings __a = initializer_range __a = use_labels __a = scope def a__ ( self ): __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = None if self.use_input_mask: __a = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: __a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __a = self.get_config() return config, input_ids, input_mask, token_labels def a__ ( self ): return BertGenerationConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , ) def a__ ( self ): ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.prepare_config_and_inputs() __a = True __a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase ) __a = model(lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = BertGenerationEncoder(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase , ): __a = True __a = True __a = BertGenerationDecoder(config=lowerCamelCase ).to(lowerCamelCase ).eval() # first forward pass __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , use_cache=lowerCamelCase , ) __a = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids __a = ids_tensor((self.batch_size, 3) , config.vocab_size ) __a = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and __a = torch.cat([input_ids, next_tokens] , dim=-1 ) __a = torch.cat([input_mask, next_mask] , dim=-1 ) __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] __a = model( lowerCamelCase , attention_mask=lowerCamelCase , encoder_hidden_states=lowerCamelCase , encoder_attention_mask=lowerCamelCase , past_key_values=lowerCamelCase , output_hidden_states=lowerCamelCase , )["hidden_states"][0] # select random slice __a = ids_tensor((1,) , output_from_past.shape[-1] ).item() __a = output_from_no_past[:, -3:, random_slice_idx].detach() __a = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 ) ) def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase , ): __a = BertGenerationDecoder(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __a = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def a__ ( self ): __a , __a , __a , __a = self.prepare_config_and_inputs() __a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ ( snake_case_, snake_case_, snake_case_, unittest.TestCase ): _snake_case : Union[str, Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () _snake_case : Any = (BertGenerationDecoder,) if is_torch_available() else () _snake_case : Union[str, Any] = ( {"""feature-extraction""": BertGenerationEncoder, """text-generation""": BertGenerationDecoder} if is_torch_available() else {} ) def a__ ( self ): __a = BertGenerationEncoderTester(self ) __a = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 ) def a__ ( self ): self.config_tester.run_common_tests() def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def a__ ( self ): __a , __a , __a , __a = self.model_tester.prepare_config_and_inputs() __a = "bert" self.model_tester.create_and_check_model(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*lowerCamelCase ) def a__ ( self ): # This regression test was failing with PyTorch < 1.3 ( ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ( __a ) , ) = self.model_tester.prepare_config_and_inputs_for_decoder() __a = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) def a__ ( self ): __a = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*lowerCamelCase ) @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) self.assertIsNotNone(lowerCamelCase ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class snake_case__ ( unittest.TestCase ): @slow def a__ ( self ): __a = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" ) __a = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 10140, 102]] ) with torch.no_grad(): __a = model(lowerCamelCase )[0] __a = torch.Size([1, 8, 50358] ) self.assertEqual(output.shape , lowerCamelCase ) __a = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
261
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _A = logging.get_logger(__name__) _A = { """microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""", } class _lowerCamelCase ( a_ ): _lowerCamelCase :Tuple = "git_vision_model" def __init__( self : List[Any] , UpperCamelCase : str=7_68 , UpperCamelCase : Optional[Any]=30_72 , UpperCamelCase : Dict=12 , UpperCamelCase : Union[str, Any]=12 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Dict=2_24 , UpperCamelCase : str=16 , UpperCamelCase : Optional[int]="quick_gelu" , UpperCamelCase : List[str]=1E-5 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Any=0.02 , **UpperCamelCase : Optional[Any] , ) -> Tuple: """simple docstring""" super().__init__(**UpperCamelCase ) lowerCAmelCase__ : List[str] = hidden_size lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : Optional[Any] = num_attention_heads lowerCAmelCase__ : Any = num_channels lowerCAmelCase__ : Dict = patch_size lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : str = attention_dropout lowerCAmelCase__ : int = layer_norm_eps lowerCAmelCase__ : str = hidden_act @classmethod def _lowerCAmelCase ( cls : Union[str, Any] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Optional[Any] ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : str = cls.get_config_dict(UpperCamelCase , **UpperCamelCase ) # get the vision config dict if we are loading from GITConfig if config_dict.get("""model_type""" ) == "git": lowerCAmelCase__ : str = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCamelCase , **UpperCamelCase ) class _lowerCamelCase ( a_ ): _lowerCamelCase :Optional[Any] = "git" def __init__( self : Dict , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=3_05_22 , UpperCamelCase : Union[str, Any]=7_68 , UpperCamelCase : Dict=6 , UpperCamelCase : int=12 , UpperCamelCase : Optional[Any]=30_72 , UpperCamelCase : List[str]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Union[str, Any]=10_24 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Union[str, Any]=1E-1_2 , UpperCamelCase : List[Any]=0 , UpperCamelCase : int="absolute" , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Any=False , UpperCamelCase : str=1_01 , UpperCamelCase : Dict=1_02 , UpperCamelCase : Tuple=None , **UpperCamelCase : Tuple , ) -> List[Any]: """simple docstring""" super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , pad_token_id=UpperCamelCase , **UpperCamelCase ) if vision_config is None: lowerCAmelCase__ : Optional[int] = {} logger.info("""vision_config is None. initializing the GitVisionConfig with default values.""" ) lowerCAmelCase__ : Tuple = GitVisionConfig(**UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = vocab_size lowerCAmelCase__ : Union[str, Any] = hidden_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Union[str, Any] = num_attention_heads lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : int = layer_norm_eps lowerCAmelCase__ : List[str] = position_embedding_type lowerCAmelCase__ : Dict = use_cache lowerCAmelCase__ : List[Any] = tie_word_embeddings lowerCAmelCase__ : List[Any] = num_image_with_embedding lowerCAmelCase__ : List[str] = bos_token_id lowerCAmelCase__ : int = eos_token_id def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Tuple = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ : Tuple = self.vision_config.to_dict() lowerCAmelCase__ : Any = self.__class__.model_type return output
212
"""simple docstring""" import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _A = logging.get_logger(__name__) _A = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } _A = { """vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""}, """merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""}, """tokenizer_config_file""": { """facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json""" }, } _A = {"""facebook/blenderbot-3B""": 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowercase_ ( ) -> Tuple: lowerCAmelCase__ : int = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowerCAmelCase__ : Any = bs[:] lowerCAmelCase__ : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(__UpperCAmelCase ) cs.append(2**8 + n ) n += 1 lowerCAmelCase__ : Dict = [chr(__UpperCAmelCase ) for n in cs] return dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) def lowercase_ ( __UpperCAmelCase ) -> List[Any]: lowerCAmelCase__ : List[Any] = set() lowerCAmelCase__ : Union[str, Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowerCAmelCase__ : Optional[Any] = char return pairs class _lowerCamelCase ( a_ ): _lowerCamelCase :Optional[Any] = VOCAB_FILES_NAMES _lowerCamelCase :List[Any] = PRETRAINED_VOCAB_FILES_MAP _lowerCamelCase :Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _lowerCamelCase :Optional[Any] = ["input_ids", "attention_mask"] def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : Any="replace" , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<pad>" , UpperCamelCase : Dict="<mask>" , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Optional[Any] , ) -> Any: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token lowerCAmelCase__ : Dict = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token lowerCAmelCase__ : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token super().__init__( errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , ) with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle: lowerCAmelCase__ : Any = json.load(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()} lowerCAmelCase__ : Dict = errors # how to handle errors in decoding lowerCAmelCase__ : Union[str, Any] = bytes_to_unicode() lowerCAmelCase__ : List[str] = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle: lowerCAmelCase__ : Optional[int] = merges_handle.read().split("""\n""" )[1:-1] lowerCAmelCase__ : Dict = [tuple(merge.split() ) for merge in bpe_merges] lowerCAmelCase__ : Any = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : Dict = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowerCAmelCase__ : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return len(self.encoder ) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str ) -> Union[str, Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowerCAmelCase__ : Union[str, Any] = tuple(UpperCamelCase ) lowerCAmelCase__ : List[str] = get_pairs(UpperCamelCase ) if not pairs: return token while True: lowerCAmelCase__ : List[str] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowerCAmelCase__ , lowerCAmelCase__ : str = bigram lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[str] = 0 while i < len(UpperCamelCase ): try: lowerCAmelCase__ : Optional[Any] = word.index(UpperCamelCase , UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowerCAmelCase__ : List[str] = j if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowerCAmelCase__ : List[Any] = tuple(UpperCamelCase ) lowerCAmelCase__ : Tuple = new_word if len(UpperCamelCase ) == 1: break else: lowerCAmelCase__ : Any = get_pairs(UpperCamelCase ) lowerCAmelCase__ : Tuple = """ """.join(UpperCamelCase ) lowerCAmelCase__ : Tuple = word return word def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : List[str] ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Tuple = [] for token in re.findall(self.pat , UpperCamelCase ): lowerCAmelCase__ : List[Any] = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) ) return bpe_tokens def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Dict: """simple docstring""" return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) ) def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Optional[Any] ) -> Tuple: """simple docstring""" return self.decoder.get(UpperCamelCase ) def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Optional[int] ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[str] = """""".join(UpperCamelCase ) lowerCAmelCase__ : List[str] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def _lowerCAmelCase ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : Union[str, Any] = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowerCAmelCase__ : int = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" ) lowerCAmelCase__ : Optional[Any] = 0 with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" """ Please check that the tokenizer is not corrupted!""" ) lowerCAmelCase__ : Dict = token_index writer.write(""" """.join(UpperCamelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def _lowerCAmelCase ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1] def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id] lowerCAmelCase__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int]=False , **UpperCamelCase : Union[str, Any] ) -> str: """simple docstring""" lowerCAmelCase__ : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()): lowerCAmelCase__ : Tuple = """ """ + text return (text, kwargs) def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ) -> Any: """simple docstring""" return token_ids_a + [self.eos_token_id] def _lowerCAmelCase ( self : str , UpperCamelCase : "Conversation" ) -> List[int]: """simple docstring""" lowerCAmelCase__ : List[str] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text ) else: # Generated responses should contain them already. inputs.append(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = """ """.join(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = self.encode(UpperCamelCase ) if len(UpperCamelCase ) > self.model_max_length: lowerCAmelCase__ : List[str] = input_ids[-self.model_max_length :] logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" ) return input_ids
212
1
"""simple docstring""" import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __magic_name__ = logging.get_logger(__name__) __magic_name__ = "https://openaipublic.azureedge.net/jukebox/models/" __magic_name__ = { "jukebox-1b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "1b_lyrics/prior_level_2.pth.tar", ], "jukebox-5b-lyrics": [ "5b/vqvae.pth.tar", "5b/prior_level_0.pth.tar", "5b/prior_level_1.pth.tar", "5b_lyrics/prior_level_2.pth.tar", ], } def _lowerCAmelCase ( UpperCamelCase_ ): if key.endswith(""".model.1.bias""" ) and len(key.split(""".""" ) ) > 10: __SCREAMING_SNAKE_CASE = key.replace(""".model.1.bias""" , """.conv1d_1.bias""" ) elif key.endswith(""".model.1.weight""" ) and len(key.split(""".""" ) ) > 10: __SCREAMING_SNAKE_CASE = key.replace(""".model.1.weight""" , """.conv1d_1.weight""" ) elif key.endswith(""".model.3.bias""" ) and len(key.split(""".""" ) ) > 10: __SCREAMING_SNAKE_CASE = key.replace(""".model.3.bias""" , """.conv1d_2.bias""" ) elif key.endswith(""".model.3.weight""" ) and len(key.split(""".""" ) ) > 10: __SCREAMING_SNAKE_CASE = key.replace(""".model.3.weight""" , """.conv1d_2.weight""" ) if "conditioner_blocks.0." in key: __SCREAMING_SNAKE_CASE = key.replace("""conditioner_blocks.0""" , """conditioner_blocks""" ) if "prime_prior" in key: __SCREAMING_SNAKE_CASE = key.replace("""prime_prior""" , """encoder""" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: __SCREAMING_SNAKE_CASE = key.replace(""".emb.""" , """.""" ) if key.endswith("""k""" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(""".k""" , """.codebook""" ) if "y_emb." in key: return key.replace("""y_emb.""" , """metadata_embedding.""" ) if "x_emb.emb." in key: __SCREAMING_SNAKE_CASE = key.replace("""0.x_emb.emb""" , """embed_tokens""" ) if "prime_state_ln" in key: return key.replace("""prime_state_ln""" , """encoder.final_layer_norm""" ) if ".ln" in key: return key.replace(""".ln""" , """.layer_norm""" ) if "_ln" in key: return key.replace("""_ln""" , """_layer_norm""" ) if "prime_state_proj" in key: return key.replace("""prime_state_proj""" , """encoder.proj_in""" ) if "prime_x_out" in key: return key.replace("""prime_x_out""" , """encoder.lm_head""" ) if "prior.x_out" in key: return key.replace("""x_out""" , """fc_proj_out""" ) if "x_emb" in key: return key.replace("""x_emb""" , """embed_tokens""" ) return key def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = {} import re __SCREAMING_SNAKE_CASE = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile( r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile(r"""encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile( r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile(r"""decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile( r"""conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)""" ) __SCREAMING_SNAKE_CASE = re.compile(r"""conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)""" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) __SCREAMING_SNAKE_CASE = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}" __SCREAMING_SNAKE_CASE = re_encoder_block_conv_in.sub(UpperCamelCase_ , UpperCamelCase_ ) elif re_encoder_block_resnet.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_encoder_block_resnet.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) __SCREAMING_SNAKE_CASE = {"""1""": 1, """3""": 2}[groups[-2]] __SCREAMING_SNAKE_CASE = f"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}." __SCREAMING_SNAKE_CASE = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" __SCREAMING_SNAKE_CASE = prefix + resnet_block __SCREAMING_SNAKE_CASE = re_encoder_block_resnet.sub(UpperCamelCase_ , UpperCamelCase_ ) elif re_encoder_block_proj_out.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = f"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}" __SCREAMING_SNAKE_CASE = re_encoder_block_proj_out.sub(UpperCamelCase_ , UpperCamelCase_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 __SCREAMING_SNAKE_CASE = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}" __SCREAMING_SNAKE_CASE = re_decoder_block_conv_out.sub(UpperCamelCase_ , UpperCamelCase_ ) elif re_decoder_block_resnet.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_decoder_block_resnet.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = int(groups[2] ) * 2 + int(groups[3] ) - 2 __SCREAMING_SNAKE_CASE = {"""1""": 1, """3""": 2}[groups[-2]] __SCREAMING_SNAKE_CASE = f"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}." __SCREAMING_SNAKE_CASE = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" __SCREAMING_SNAKE_CASE = prefix + resnet_block __SCREAMING_SNAKE_CASE = re_decoder_block_resnet.sub(UpperCamelCase_ , UpperCamelCase_ ) elif re_decoder_block_proj_in.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = f"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}" __SCREAMING_SNAKE_CASE = re_decoder_block_proj_in.sub(UpperCamelCase_ , UpperCamelCase_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 __SCREAMING_SNAKE_CASE = f"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}" __SCREAMING_SNAKE_CASE = re_prior_cond_conv_out.sub(UpperCamelCase_ , UpperCamelCase_ ) elif re_prior_cond_resnet.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_prior_cond_resnet.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = int(groups[1] ) * 2 + int(groups[2] ) - 2 __SCREAMING_SNAKE_CASE = {"""1""": 1, """3""": 2}[groups[-2]] __SCREAMING_SNAKE_CASE = f"conditioner_blocks.upsampler.upsample_block.{block_index}." __SCREAMING_SNAKE_CASE = f"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}" __SCREAMING_SNAKE_CASE = prefix + resnet_block __SCREAMING_SNAKE_CASE = re_prior_cond_resnet.sub(UpperCamelCase_ , UpperCamelCase_ ) elif re_prior_cond_proj_in.fullmatch(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.match(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = regex_match.groups() __SCREAMING_SNAKE_CASE = f"conditioner_blocks.upsampler.proj_in.{groups[-1]}" __SCREAMING_SNAKE_CASE = re_prior_cond_proj_in.sub(UpperCamelCase_ , UpperCamelCase_ ) # keep original key else: __SCREAMING_SNAKE_CASE = original_key __SCREAMING_SNAKE_CASE = replace_key(UpperCamelCase_ ) if f"{key_prefix}.{key}" not in model_state_dict or key is None: print(f"failed converting {original_key} to {key}, does not match" ) # handle missmatched shape elif value.shape != model_state_dict[f"{key_prefix}.{key}"].shape: __SCREAMING_SNAKE_CASE = model_state_dict[f"{key_prefix}.{key}"] print(f"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" ) __SCREAMING_SNAKE_CASE = original_key __SCREAMING_SNAKE_CASE = original_key __SCREAMING_SNAKE_CASE = value return new_dict @torch.no_grad() def _lowerCAmelCase ( UpperCamelCase_=None , UpperCamelCase_=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ): __SCREAMING_SNAKE_CASE = requests.get(f"{PREFIX}{file}" , allow_redirects=UpperCamelCase_ ) os.makedirs(f"{pytorch_dump_folder_path}/" , exist_ok=UpperCamelCase_ ) open(f"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , """wb""" ).write(r.content ) __SCREAMING_SNAKE_CASE = MODEL_MAPPING[model_name.split("""/""" )[-1]] __SCREAMING_SNAKE_CASE = JukeboxConfig.from_pretrained(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = JukeboxModel(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = {} for i, dict_name in enumerate(UpperCamelCase_ ): __SCREAMING_SNAKE_CASE = torch.load(f"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["""model"""] __SCREAMING_SNAKE_CASE = {} for k in old_dic.keys(): if k.endswith(""".b""" ): __SCREAMING_SNAKE_CASE = old_dic[k] elif k.endswith(""".w""" ): __SCREAMING_SNAKE_CASE = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: __SCREAMING_SNAKE_CASE = old_dic[k] else: __SCREAMING_SNAKE_CASE = old_dic[k] __SCREAMING_SNAKE_CASE = """vqvae""" if i == 0 else f"priors.{3 - i}" __SCREAMING_SNAKE_CASE = fix_jukebox_keys(UpperCamelCase_ , model.state_dict() , UpperCamelCase_ , UpperCamelCase_ ) weight_dict.append(UpperCamelCase_ ) __SCREAMING_SNAKE_CASE = weight_dict.pop(0 ) model.vqvae.load_state_dict(UpperCamelCase_ ) for i in range(len(UpperCamelCase_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(UpperCamelCase_ ).mkdir(exist_ok=UpperCamelCase_ ) with open(f"{pytorch_dump_folder_path}/mapping.json" , """w""" ) as txtfile: json.dump(UpperCamelCase_ , UpperCamelCase_ ) print(f"Saving model {model_name} to {pytorch_dump_folder_path}" ) model.save_pretrained(UpperCamelCase_ ) return weight_dict if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="jukebox-5b-lyrics", type=str, help="Name of the model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="jukebox-5b-lyrics-converted", type=str, help="Path to the output PyTorch model directory.", ) __magic_name__ = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
100
'''simple docstring''' import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: snake_case_ : List[Any] = None snake_case_ : str = logging.get_logger(__name__) snake_case_ : Dict = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} snake_case_ : List[Any] = { 'vocab_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model', }, 'tokenizer_file': { 'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json', 'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json', 'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json', 'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json', 'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json', 'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json', 'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json', 'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json', }, } snake_case_ : List[str] = { 'albert-base-v1': 512, 'albert-large-v1': 512, 'albert-xlarge-v1': 512, 'albert-xxlarge-v1': 512, 'albert-base-v2': 512, 'albert-large-v2': 512, 'albert-xlarge-v2': 512, 'albert-xxlarge-v2': 512, } snake_case_ : List[str] = '▁' class lowercase__ ( lowercase ): lowercase__ = VOCAB_FILES_NAMES lowercase__ = PRETRAINED_VOCAB_FILES_MAP lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase__ = AlbertTokenizer def __init__( self : Tuple ,lowerCamelCase__ : Optional[int]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : int=True ,lowerCamelCase__ : Any=False ,lowerCamelCase__ : Optional[int]="[CLS]" ,lowerCamelCase__ : Union[str, Any]="[SEP]" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : str="[SEP]" ,lowerCamelCase__ : List[Any]="<pad>" ,lowerCamelCase__ : Dict="[CLS]" ,lowerCamelCase__ : int="[MASK]" ,**lowerCamelCase__ : Any ,): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. _UpperCamelCase : Dict = ( AddedToken(lowerCamelCase__ ,lstrip=lowerCamelCase__ ,rstrip=lowerCamelCase__ ,normalized=lowerCamelCase__ ) if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) else mask_token ) super().__init__( lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,remove_space=lowerCamelCase__ ,keep_accents=lowerCamelCase__ ,bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,**lowerCamelCase__ ,) _UpperCamelCase : Tuple = do_lower_case _UpperCamelCase : str = remove_space _UpperCamelCase : Optional[Any] = keep_accents _UpperCamelCase : Dict = vocab_file _UpperCamelCase : Dict = False if not self.vocab_file else True def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' _UpperCamelCase : List[Any] = [self.sep_token_id] _UpperCamelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): '''simple docstring''' _UpperCamelCase : int = [self.sep_token_id] _UpperCamelCase : int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.' ) if not os.path.isdir(lowerCamelCase__ ): logger.error(F'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCamelCase : Dict = os.path.join( lowerCamelCase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ): copyfile(self.vocab_file ,lowerCamelCase__ ) return (out_vocab_file,)
83
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
324
"""simple docstring""" import os from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home _snake_case = HUGGINGFACE_HUB_CACHE _snake_case = 'config.json' _snake_case = 'diffusion_pytorch_model.bin' _snake_case = 'diffusion_flax_model.msgpack' _snake_case = 'model.onnx' _snake_case = 'diffusion_pytorch_model.safetensors' _snake_case = 'weights.pb' _snake_case = 'https://huggingface.co' _snake_case = default_cache_path _snake_case = 'diffusers_modules' _snake_case = os.getenv('HF_MODULES_CACHE', os.path.join(hf_cache_home, 'modules')) _snake_case = ['fp16', 'non-ema'] _snake_case = '.self_attn'
324
1
'''simple docstring''' from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf _lowerCAmelCase = logging.get_logger(__name__) @dataclass class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' __lowercase : Any = [ '''no_inference''', '''no_cuda''', '''no_tpu''', '''no_speed''', '''no_memory''', '''no_env_print''', '''no_multi_process''', ] def __init__( self ,**__UpperCAmelCase ) -> int: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: lowerCAmelCase__ : str = deprecated_arg[3:] lowerCAmelCase__ : Union[str, Any] = not kwargs.pop(__UpperCAmelCase ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) lowerCAmelCase__ : Union[str, Any] = kwargs.pop("""tpu_name""" ,self.tpu_name ) lowerCAmelCase__ : str = kwargs.pop("""device_idx""" ,self.device_idx ) lowerCAmelCase__ : int = kwargs.pop("""eager_mode""" ,self.eager_mode ) lowerCAmelCase__ : int = kwargs.pop("""use_xla""" ,self.use_xla ) super().__init__(**__UpperCAmelCase ) __lowercase : str = field( default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Name of TPU'''} , ) __lowercase : int = field( default=0 , metadata={'''help''': '''CPU / GPU device index. Defaults to 0.'''} , ) __lowercase : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Benchmark models in eager model.'''} ) __lowercase : bool = field( default=SCREAMING_SNAKE_CASE_ , metadata={ '''help''': '''Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.''' } , ) @cached_property def UpperCAmelCase_ ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,["""tf"""] ) lowerCAmelCase__ : Any = None if self.tpu: try: if self.tpu_name: lowerCAmelCase__ : List[Any] = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: lowerCAmelCase__ : Any = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: lowerCAmelCase__ : Optional[Any] = None return tpu @cached_property def UpperCAmelCase_ ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self ,["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) lowerCAmelCase__ : Any = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] ,"""GPU""" ) lowerCAmelCase__ : str = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] ,"""GPU""" ) # disable GPU lowerCAmelCase__ : str = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def UpperCAmelCase_ ( self ) -> bool: requires_backends(self ,["""tf"""] ) return self._setup_tpu is not None @property def UpperCAmelCase_ ( self ) -> "tf.distribute.Strategy": requires_backends(self ,["""tf"""] ) return self._setup_strategy @property def UpperCAmelCase_ ( self ) -> str: requires_backends(self ,["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def UpperCAmelCase_ ( self ) -> int: requires_backends(self ,["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def UpperCAmelCase_ ( self ) -> bool: return self.n_gpu > 0
37
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax _lowerCAmelCase = logging.get_logger(__name__) @add_end_docstrings(SCREAMING_SNAKE_CASE_ ) class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ): '''simple docstring''' def __init__( self ,**__UpperCAmelCase ) -> Tuple: super().__init__(**__UpperCAmelCase ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> str: return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase ) def UpperCAmelCase_ ( self ,**__UpperCAmelCase ) -> str: lowerCAmelCase__ : List[Any] = {} if "candidate_labels" in kwargs: lowerCAmelCase__ : int = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: lowerCAmelCase__ : Optional[int] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase="This is a photo of {}." ) -> int: lowerCAmelCase__ : str = load_image(__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.image_processor(images=[image] ,return_tensors=self.framework ) lowerCAmelCase__ : List[Any] = candidate_labels lowerCAmelCase__ : List[str] = [hypothesis_template.format(__UpperCAmelCase ) for x in candidate_labels] lowerCAmelCase__ : Optional[Any] = self.tokenizer(__UpperCAmelCase ,return_tensors=self.framework ,padding=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = [text_inputs] return inputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = model_inputs.pop("""candidate_labels""" ) lowerCAmelCase__ : Union[str, Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] ,__UpperCAmelCase ): lowerCAmelCase__ : int = text_inputs[0] else: # Batching case. lowerCAmelCase__ : Dict = text_inputs[0][0] lowerCAmelCase__ : Any = self.model(**__UpperCAmelCase ,**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Any: lowerCAmelCase__ : Union[str, Any] = model_outputs.pop("""candidate_labels""" ) lowerCAmelCase__ : List[str] = model_outputs["""logits"""][0] if self.framework == "pt": lowerCAmelCase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowerCAmelCase__ : Optional[Any] = probs.tolist() if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ): lowerCAmelCase__ : Dict = [scores] elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(__UpperCAmelCase ,axis=-1 ) lowerCAmelCase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F"""Unsupported framework: {self.framework}""" ) lowerCAmelCase__ : Tuple = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(__UpperCAmelCase ,__UpperCAmelCase ) ,key=lambda __UpperCAmelCase : -x[0] ) ] return result
37
1
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCAmelCase_ ( lowerCamelCase__ ): '''simple docstring''' __snake_case = ["image_processor", "tokenizer"] __snake_case = "CLIPImageProcessor" __snake_case = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast") def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ): snake_case_ = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , _UpperCAmelCase , ) snake_case_ = kwargs.pop('''feature_extractor''' ) snake_case_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(_UpperCAmelCase , _UpperCAmelCase ) def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase ): if text is None and images is None: raise ValueError('''You have to specify either text or images. Both cannot be none.''' ) if text is not None: snake_case_ = self.tokenizer(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if images is not None: snake_case_ = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ) if text is not None and images is not None: snake_case_ = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase ) def UpperCamelCase__ ( self , *_UpperCAmelCase , **_UpperCAmelCase ): return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase ) def UpperCamelCase__ ( self , *_UpperCAmelCase , **_UpperCAmelCase ): return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase ) @property def UpperCamelCase__ ( self ): snake_case_ = self.tokenizer.model_input_names snake_case_ = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
267
import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCAmelCase_ : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=99 , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=9 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase=8 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.002 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=None , _UpperCAmelCase=None , ): snake_case_ = parent snake_case_ = batch_size snake_case_ = encoder_seq_length snake_case_ = decoder_seq_length # For common tests snake_case_ = self.decoder_seq_length snake_case_ = is_training snake_case_ = use_attention_mask snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = d_ff snake_case_ = relative_attention_num_buckets snake_case_ = dropout_rate snake_case_ = initializer_factor snake_case_ = eos_token_id snake_case_ = pad_token_id snake_case_ = decoder_start_token_id snake_case_ = None snake_case_ = decoder_layers def UpperCamelCase__ ( self ): return TaConfig.from_pretrained('''google/umt5-base''' ) def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ): if attention_mask is None: snake_case_ = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case_ = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case_ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=_UpperCAmelCase ) if decoder_head_mask is None: snake_case_ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase ) if cross_attn_head_mask is None: snake_case_ = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=_UpperCAmelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def UpperCamelCase__ ( self ): snake_case_ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) snake_case_ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case_ = input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case_ = self.get_config() snake_case_ = config.num_attention_heads snake_case_ = self.prepare_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, input_dict def UpperCamelCase__ ( self ): snake_case_ , snake_case_ = self.prepare_config_and_inputs() return config, inputs_dict def UpperCamelCase__ ( self ): return TaConfig( vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCamelCase__ ( self ): return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): snake_case_ = UMTaModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() snake_case_ = model( input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , ) snake_case_ = model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ) snake_case_ = result.last_hidden_state snake_case_ = result.past_key_values snake_case_ = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(_UpperCAmelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ): snake_case_ = UMTaModel(config=_UpperCAmelCase ).get_decoder().to(_UpperCAmelCase ).eval() # first forward pass snake_case_ = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) snake_case_ = model(_UpperCAmelCase ) snake_case_ = model(_UpperCAmelCase , use_cache=_UpperCAmelCase ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) ) self.parent.assertTrue(len(_UpperCAmelCase ) == len(_UpperCAmelCase ) + 1 ) snake_case_ , snake_case_ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case_ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and snake_case_ = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case_ = model(_UpperCAmelCase )['''last_hidden_state'''] snake_case_ = model(_UpperCAmelCase , past_key_values=_UpperCAmelCase )['''last_hidden_state'''] # select random slice snake_case_ = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case_ = output_from_no_past[:, -1, random_slice_idx].detach() snake_case_ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=1E-3 ) ) def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , ): snake_case_ = UMTaModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).half().eval() snake_case_ = model(**_UpperCAmelCase )['''last_hidden_state'''] self.parent.assertFalse(torch.isnan(_UpperCAmelCase ).any().item() ) @require_torch class lowerCAmelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): '''simple docstring''' __snake_case = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __snake_case = (UMTaForConditionalGeneration,) if is_torch_available() else () __snake_case = ( { "conversational": UMTaForConditionalGeneration, "feature-extraction": UMTaModel, "summarization": UMTaForConditionalGeneration, "text2text-generation": UMTaForConditionalGeneration, "translation": UMTaForConditionalGeneration, "question-answering": UMTaForQuestionAnswering, } if is_torch_available() else {} ) __snake_case = True __snake_case = False __snake_case = False __snake_case = True __snake_case = True # The small UMT5 model needs higher percentages for CPU/MP tests __snake_case = [0.8, 0.9] def UpperCamelCase__ ( self ): snake_case_ = UMTaModelTester(self ) @unittest.skip('''Test has a segmentation fault on torch 1.8.0''' ) def UpperCamelCase__ ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = UMTaModel(config_and_inputs[0] ).to(_UpperCAmelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( _UpperCAmelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'''{tmpdirname}/t5_test.onnx''' , export_params=_UpperCAmelCase , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , ) @unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' ) def UpperCamelCase__ ( self ): snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*_UpperCAmelCase ) def UpperCamelCase__ ( self ): snake_case_ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions'''] snake_case_ = self.model_tester.prepare_config_and_inputs() snake_case_ = config_and_inputs[0] snake_case_ = UMTaForConditionalGeneration(_UpperCAmelCase ).eval() model.to(_UpperCAmelCase ) snake_case_ = { '''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=_UpperCAmelCase ), '''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase ), '''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase ), } for attn_name, (name, mask) in zip(_UpperCAmelCase , head_masking.items() ): snake_case_ = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": snake_case_ = torch.ones( config.num_decoder_layers , config.num_heads , device=_UpperCAmelCase ) snake_case_ = model.generate( config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=_UpperCAmelCase , return_dict_in_generate=_UpperCAmelCase , **_UpperCAmelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step snake_case_ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' ) def UpperCamelCase__ ( self ): pass @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow @unittest.skip( '''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' ) def UpperCamelCase__ ( self ): snake_case_ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=_UpperCAmelCase ).to(_UpperCAmelCase ) snake_case_ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=_UpperCAmelCase , legacy=_UpperCAmelCase ) snake_case_ = [ '''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''', '''No se como puedo <extra_id_0>.''', '''This is the reason why we <extra_id_0> them.''', '''The <extra_id_0> walks in <extra_id_1>, seats''', '''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''', ] snake_case_ = tokenizer(_UpperCAmelCase , return_tensors='''pt''' , padding=_UpperCAmelCase ).input_ids # fmt: off snake_case_ = torch.tensor( [ [ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1], ] ) # fmt: on torch.testing.assert_allclose(_UpperCAmelCase , _UpperCAmelCase ) snake_case_ = model.generate(input_ids.to(_UpperCAmelCase ) ) snake_case_ = [ '''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''', '''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', '''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''', ] snake_case_ = tokenizer.batch_decode(_UpperCAmelCase ) self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
267
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCAmelCase ( _lowerCAmelCase , unittest.TestCase ): a__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline a__ : int = ["image_embeds", "negative_image_embeds", "image", "hint"] a__ : List[str] = ["image_embeds", "negative_image_embeds", "image", "hint"] a__ : Union[str, Any] = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] a__ : Optional[int] = False @property def a ( self : Dict ): return 32 @property def a ( self : str ): return 32 @property def a ( self : Union[str, Any] ): return self.time_input_dim @property def a ( self : Optional[Any] ): return self.time_input_dim * 4 @property def a ( self : List[Any] ): return 1_00 @property def a ( self : Optional[Any] ): torch.manual_seed(0 ) __UpperCAmelCase = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __UpperCAmelCase = UNetaDConditionModel(**_lowercase ) return model @property def a ( self : List[str] ): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def a ( self : int ): torch.manual_seed(0 ) __UpperCAmelCase = VQModel(**self.dummy_movq_kwargs ) return model def a ( self : List[Any] ): __UpperCAmelCase = self.dummy_unet __UpperCAmelCase = self.dummy_movq __UpperCAmelCase = { '''num_train_timesteps''': 10_00, '''beta_schedule''': '''linear''', '''beta_start''': 0.00_085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } __UpperCAmelCase = DDIMScheduler(**_lowercase ) __UpperCAmelCase = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def a ( self : Dict , _lowercase : Tuple , _lowercase : Any=0 ): __UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowercase ) # create init_image __UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) __UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCAmelCase = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) ) # create hint __UpperCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase ) if str(_lowercase ).startswith('''mps''' ): __UpperCAmelCase = torch.manual_seed(_lowercase ) else: __UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase ) __UpperCAmelCase = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def a ( self : str ): __UpperCAmelCase = '''cpu''' __UpperCAmelCase = self.get_dummy_components() __UpperCAmelCase = self.pipeline_class(**_lowercase ) __UpperCAmelCase = pipe.to(_lowercase ) pipe.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = pipe(**self.get_dummy_inputs(_lowercase ) ) __UpperCAmelCase = output.images __UpperCAmelCase = pipe( **self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0] __UpperCAmelCase = image[0, -3:, -3:, -1] __UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase = np.array( [0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): def a ( self : Dict ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a ( self : Tuple ): __UpperCAmelCase = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) __UpperCAmelCase = init_image.resize((5_12, 5_12) ) __UpperCAmelCase = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) __UpperCAmelCase = torch.from_numpy(np.array(_lowercase ) ).float() / 255.0 __UpperCAmelCase = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __UpperCAmelCase = '''A robot, 4k photo''' __UpperCAmelCase = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(_lowercase ) __UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) __UpperCAmelCase = pipeline.to(_lowercase ) pipeline.set_progress_bar_config(disable=_lowercase ) __UpperCAmelCase = torch.Generator(device='''cpu''' ).manual_seed(0 ) __UpperCAmelCase , __UpperCAmelCase = pipe_prior( _lowercase , image=_lowercase , strength=0.85 , generator=_lowercase , negative_prompt='''''' , ).to_tuple() __UpperCAmelCase = pipeline( image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , hint=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , ) __UpperCAmelCase = output.images[0] assert image.shape == (5_12, 5_12, 3) assert_mean_pixel_difference(_lowercase , _lowercase )
332
"""simple docstring""" import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def lowercase__ ( ): raise RuntimeError('''CUDA out of memory.''' ) class _UpperCAmelCase ( nn.Module ): def __init__( self : Optional[Any] ): super().__init__() __UpperCAmelCase = nn.Linear(3 , 4 ) __UpperCAmelCase = nn.BatchNormad(4 ) __UpperCAmelCase = nn.Linear(4 , 5 ) def a ( self : Optional[int] , _lowercase : Optional[Any] ): return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) ) class _UpperCAmelCase ( unittest.TestCase ): def a ( self : List[str] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[int] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) def a ( self : Optional[int] ): __UpperCAmelCase = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : str , _lowercase : List[str] ): nonlocal batch_sizes batch_sizes.append(_lowercase ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga __UpperCAmelCase , __UpperCAmelCase = mock_training_loop_function('''hello''' ) self.assertListEqual(_lowercase , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, '''hello'''] ) def a ( self : Tuple ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(_lowercase : Optional[int] ): pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : List[Any] ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : List[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''No executable batch size found, reached zero.''' , cm.exception.args[0] ) def a ( self : Union[str, Any] ): @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(_lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : str ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(_lowercase ) as cm: mock_training_loop_function(1_28 , '''hello''' , '''world''' ) self.assertIn('''Batch size was passed into `f`''' , cm.exception.args[0] ) self.assertIn('''`f(arg1=\'hello\', arg2=\'world\')''' , cm.exception.args[0] ) def a ( self : Dict ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(_lowercase : int ): raise ValueError('''Oops, we had an error!''' ) with self.assertRaises(_lowercase ) as cm: mock_training_loop_function() self.assertIn('''Oops, we had an error!''' , cm.exception.args[0] ) @require_cuda def a ( self : str ): __UpperCAmelCase = torch.cuda.memory_allocated() __UpperCAmelCase = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , _lowercase ) __UpperCAmelCase = release_memory(_lowercase ) self.assertEqual(torch.cuda.memory_allocated() , _lowercase )
332
1
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class _UpperCAmelCase : def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Tuple=True , lowercase_ : str=True , lowercase_ : Tuple=99 , lowercase_ : List[str]=32 , lowercase_ : List[Any]=5 , lowercase_ : Tuple=4 , lowercase_ : Union[str, Any]=37 , lowercase_ : Optional[int]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Any=128 , lowercase_ : int=32 , lowercase_ : List[Any]=16 , lowercase_ : Dict=2 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=3 , lowercase_ : Dict=4 , lowercase_ : List[Any]=None , ): snake_case_ : str = parent snake_case_ : str = batch_size snake_case_ : List[str] = seq_length snake_case_ : Union[str, Any] = is_training snake_case_ : Tuple = use_input_mask snake_case_ : List[str] = use_token_type_ids snake_case_ : Tuple = use_labels snake_case_ : Any = vocab_size snake_case_ : Optional[Any] = hidden_size snake_case_ : Any = num_hidden_layers snake_case_ : List[str] = num_attention_heads snake_case_ : Any = intermediate_size snake_case_ : Dict = hidden_act snake_case_ : Union[str, Any] = hidden_dropout_prob snake_case_ : Optional[int] = attention_probs_dropout_prob snake_case_ : List[Any] = max_position_embeddings snake_case_ : List[str] = type_vocab_size snake_case_ : List[str] = type_sequence_label_size snake_case_ : Optional[Any] = initializer_range snake_case_ : str = num_labels snake_case_ : Union[str, Any] = num_choices snake_case_ : int = scope def _snake_case ( self : Union[str, Any] ): snake_case_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case_ : int = None if self.use_input_mask: snake_case_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) snake_case_ : Optional[int] = None if self.use_token_type_ids: snake_case_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case_ : int = None snake_case_ : int = None snake_case_ : List[Any] = None if self.use_labels: snake_case_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case_ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case_ : Tuple = ids_tensor([self.batch_size] , self.num_choices ) snake_case_ : List[str] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _snake_case ( self : List[str] ): return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , ) def _snake_case ( self : int ): ( snake_case_ ) : str = self.prepare_config_and_inputs() snake_case_ : int = True snake_case_ : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) snake_case_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def _snake_case ( self : List[Any] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Any , lowercase_ : int , lowercase_ : int ): snake_case_ : Dict = NezhaModel(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) snake_case_ : Optional[Any] = model(lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) snake_case_ : Tuple = model(lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : int , ): snake_case_ : Union[str, Any] = True snake_case_ : List[Any] = NezhaModel(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : List[str] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , encoder_attention_mask=lowerCamelCase__ , ) snake_case_ : Any = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , ) snake_case_ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _snake_case ( self : Optional[int] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Optional[int] ): snake_case_ : List[str] = NezhaForMaskedLM(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _snake_case ( self : Dict , lowercase_ : str , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Any ): snake_case_ : List[Any] = NezhaForNextSentencePrediction(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : List[Any] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def _snake_case ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str ): snake_case_ : Any = NezhaForPreTraining(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : Dict = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , next_sentence_label=lowerCamelCase__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def _snake_case ( self : Optional[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Dict ): snake_case_ : str = NezhaForQuestionAnswering(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : Optional[int] = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , start_positions=lowerCamelCase__ , end_positions=lowerCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _snake_case ( self : Dict , lowercase_ : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : int ): snake_case_ : Dict = self.num_labels snake_case_ : str = NezhaForSequenceClassification(lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : str = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Any ): snake_case_ : Optional[int] = self.num_labels snake_case_ : Tuple = NezhaForTokenClassification(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : int = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _snake_case ( self : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : str ): snake_case_ : Optional[Any] = self.num_choices snake_case_ : Union[str, Any] = NezhaForMultipleChoice(config=lowerCamelCase__ ) model.to(lowerCamelCase__ ) model.eval() snake_case_ : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case_ : Any = model( lowerCamelCase__ , attention_mask=lowerCamelCase__ , token_type_ids=lowerCamelCase__ , labels=lowerCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _snake_case ( self : Tuple ): snake_case_ : Dict = self.prepare_config_and_inputs() ( snake_case_ ) : Tuple = config_and_inputs snake_case_ : Union[str, Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _UpperCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase): _lowerCAmelCase : List[str] = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) _lowerCAmelCase : str = ( { "feature-extraction": NezhaModel, "fill-mask": NezhaForMaskedLM, "question-answering": NezhaForQuestionAnswering, "text-classification": NezhaForSequenceClassification, "token-classification": NezhaForTokenClassification, "zero-shot": NezhaForSequenceClassification, } if is_torch_available() else {} ) _lowerCAmelCase : List[str] = True def _snake_case ( self : Tuple , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Dict=False ): snake_case_ : str = super()._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ , return_labels=lowerCamelCase__ ) if return_labels: if model_class in get_values(lowerCamelCase__ ): snake_case_ : int = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase__ ) snake_case_ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase__ ) return inputs_dict def _snake_case ( self : Optional[int] ): snake_case_ : List[Any] = NezhaModelTester(self ) snake_case_ : Optional[int] = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 ) def _snake_case ( self : Any ): self.config_tester.run_common_tests() def _snake_case ( self : int ): snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase__ ) def _snake_case ( self : List[Any] ): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowerCamelCase__ ) def _snake_case ( self : List[Any] ): ( snake_case_ ) : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case_ : Union[str, Any] = None self.model_tester.create_and_check_model_as_decoder( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , ) def _snake_case ( self : List[str] ): snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ ) def _snake_case ( self : int ): snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ ) def _snake_case ( self : Optional[int] ): snake_case_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*lowerCamelCase__ ) def _snake_case ( self : Optional[int] ): snake_case_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ ) def _snake_case ( self : Optional[Any] ): snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ ) def _snake_case ( self : Optional[int] ): snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ ) def _snake_case ( self : int ): snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ ) @slow def _snake_case ( self : int ): for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ : Tuple = NezhaModel.from_pretrained(lowerCamelCase__ ) self.assertIsNotNone(lowerCamelCase__ ) @slow @require_torch_gpu def _snake_case ( self : Optional[int] ): snake_case_ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return snake_case_ : List[str] = True snake_case_ : Dict = model_class(config=lowerCamelCase__ ) snake_case_ : List[str] = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) snake_case_ : Dict = torch.jit.trace( lowerCamelCase__ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(lowerCamelCase__ , os.path.join(lowerCamelCase__ , '''bert.pt''' ) ) snake_case_ : Tuple = torch.jit.load(os.path.join(lowerCamelCase__ , '''bert.pt''' ) , map_location=lowerCamelCase__ ) loaded(inputs_dict['''input_ids'''].to(lowerCamelCase__ ) , inputs_dict['''attention_mask'''].to(lowerCamelCase__ ) ) @require_torch class _UpperCAmelCase ( unittest.TestCase): @slow def _snake_case ( self : Optional[int] ): snake_case_ : Tuple = NezhaModel.from_pretrained('''sijunhe/nezha-cn-base''' ) snake_case_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) snake_case_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case_ : Tuple = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] snake_case_ : Any = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , lowerCamelCase__ ) snake_case_ : Optional[Any] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) ) @slow def _snake_case ( self : List[str] ): snake_case_ : Union[str, Any] = NezhaForMaskedLM.from_pretrained('''sijunhe/nezha-cn-base''' ) snake_case_ : str = torch.tensor([[0, 1, 2, 3, 4, 5]] ) snake_case_ : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case_ : List[str] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ )[0] snake_case_ : List[str] = torch.Size((1, 6, 21128) ) self.assertEqual(output.shape , lowerCamelCase__ ) snake_case_ : List[Any] = torch.tensor( [[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCamelCase__ , atol=1E-4 ) )
357
"""simple docstring""" import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def __lowercase ( _a = 3 ): if isinstance(_a , _a ): raise TypeError('''number of qubits must be a integer.''' ) if number_of_qubits <= 0: raise ValueError('''number of qubits must be > 0.''' ) if math.floor(_a ) != number_of_qubits: raise ValueError('''number of qubits must be exact integer.''' ) if number_of_qubits > 10: raise ValueError('''number of qubits too large to simulate(>10).''' ) snake_case_ : Tuple = QuantumRegister(_a , '''qr''' ) snake_case_ : Optional[Any] = ClassicalRegister(_a , '''cr''' ) snake_case_ : Any = QuantumCircuit(_a , _a ) snake_case_ : int = number_of_qubits for i in range(_a ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(_a ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , _a , _a ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(_a , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(_a , _a ) # simulate with 10000 shots snake_case_ : Any = Aer.get_backend('''qasm_simulator''' ) snake_case_ : Optional[int] = execute(_a , _a , shots=10_000 ) return job.result().get_counts(_a ) if __name__ == "__main__": print( f'Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}' )
155
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import RoFormerConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerModel, ) from transformers.models.roformer.modeling_tf_roformer import ( TFRoFormerSelfAttention, TFRoFormerSinusoidalPositionalEmbedding, ) class _snake_case : def __init__( self : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : int=13 , UpperCAmelCase : Dict=7 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=True , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : int=2 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Optional[Any]=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Union[str, Any]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : Union[str, Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : int=3 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : Optional[int]=None , ): __lowerCamelCase : Optional[Any] = parent __lowerCamelCase : Dict = 13 __lowerCamelCase : Dict = 7 __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : Union[str, Any] = True __lowerCamelCase : List[Any] = True __lowerCamelCase : Optional[Any] = True __lowerCamelCase : Optional[Any] = 99 __lowerCamelCase : Any = 32 __lowerCamelCase : Any = 2 __lowerCamelCase : Dict = 4 __lowerCamelCase : Optional[Any] = 37 __lowerCamelCase : List[str] = "gelu" __lowerCamelCase : str = 0.1 __lowerCamelCase : List[str] = 0.1 __lowerCamelCase : List[str] = 512 __lowerCamelCase : Optional[int] = 16 __lowerCamelCase : Dict = 2 __lowerCamelCase : Optional[int] = 0.0_2 __lowerCamelCase : str = 3 __lowerCamelCase : List[str] = 4 __lowerCamelCase : Tuple = None def lowerCamelCase__ ( self : Tuple ): __lowerCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCamelCase : Union[str, Any] = None if self.use_input_mask: __lowerCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCamelCase : Union[str, Any] = None if self.use_token_type_ids: __lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCamelCase : Optional[int] = None __lowerCamelCase : Optional[Any] = None __lowerCamelCase : Any = None if self.use_labels: __lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowerCamelCase : List[Any] = RoFormerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] ): __lowerCamelCase : int = TFRoFormerModel(config=UpperCAmelCase ) __lowerCamelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} __lowerCamelCase : List[Any] = [input_ids, input_mask] __lowerCamelCase : List[str] = model(UpperCAmelCase ) __lowerCamelCase : Optional[int] = model(UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] ): __lowerCamelCase : str = True __lowerCamelCase : Tuple = TFRoFormerForCausalLM(config=UpperCAmelCase ) __lowerCamelCase : Optional[int] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCamelCase : Tuple = model(UpperCAmelCase )["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] ) def lowerCamelCase__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ): __lowerCamelCase : Optional[Any] = TFRoFormerForMaskedLM(config=UpperCAmelCase ) __lowerCamelCase : Dict = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCamelCase : str = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] ): __lowerCamelCase : Tuple = self.num_labels __lowerCamelCase : Optional[Any] = TFRoFormerForSequenceClassification(config=UpperCAmelCase ) __lowerCamelCase : Tuple = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCamelCase : str = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] ): __lowerCamelCase : str = self.num_choices __lowerCamelCase : Dict = TFRoFormerForMultipleChoice(config=UpperCAmelCase ) __lowerCamelCase : str = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCamelCase : Union[str, Any] = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCamelCase : Union[str, Any] = tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) ) __lowerCamelCase : List[Any] = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } __lowerCamelCase : Tuple = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Any ): __lowerCamelCase : Tuple = self.num_labels __lowerCamelCase : Optional[int] = TFRoFormerForTokenClassification(config=UpperCAmelCase ) __lowerCamelCase : List[Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCamelCase : Dict = model(UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict ): __lowerCamelCase : List[Any] = TFRoFormerForQuestionAnswering(config=UpperCAmelCase ) __lowerCamelCase : Union[str, Any] = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } __lowerCamelCase : List[str] = model(UpperCAmelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Any ): __lowerCamelCase : int = self.prepare_config_and_inputs() ( ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ( __lowerCamelCase ) , ) : Union[str, Any] = config_and_inputs __lowerCamelCase : List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class _snake_case ( a__ , a__ , unittest.TestCase ): snake_case__ = ( ( TFRoFormerModel, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerForMultipleChoice, ) if is_tf_available() else () ) snake_case__ = ( { "feature-extraction": TFRoFormerModel, "fill-mask": TFRoFormerForMaskedLM, "question-answering": TFRoFormerForQuestionAnswering, "text-classification": TFRoFormerForSequenceClassification, "text-generation": TFRoFormerForCausalLM, "token-classification": TFRoFormerForTokenClassification, "zero-shot": TFRoFormerForSequenceClassification, } if is_tf_available() else {} ) snake_case__ = False snake_case__ = False def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int ): if pipeline_test_casse_name == "TextGenerationPipelineTests": return True return False def lowerCamelCase__ ( self : List[Any] ): __lowerCamelCase : Optional[int] = TFRoFormerModelTester(self ) __lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 ) def lowerCamelCase__ ( self : str ): self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCAmelCase ) def lowerCamelCase__ ( self : Tuple ): __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase ) def lowerCamelCase__ ( self : List[Any] ): __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*UpperCAmelCase ) def lowerCamelCase__ ( self : Any ): __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): __lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase ) def lowerCamelCase__ ( self : Optional[int] ): __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase ) def lowerCamelCase__ ( self : Tuple ): __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase ) @slow def lowerCamelCase__ ( self : Tuple ): __lowerCamelCase : Dict = TFRoFormerModel.from_pretrained("junnyu/roformer_chinese_base" ) self.assertIsNotNone(UpperCAmelCase ) @require_tf class _snake_case ( unittest.TestCase ): @slow def lowerCamelCase__ ( self : List[Any] ): __lowerCamelCase : Optional[Any] = TFRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" ) __lowerCamelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowerCamelCase : str = model(UpperCAmelCase )[0] # TODO Replace vocab size __lowerCamelCase : int = 50000 __lowerCamelCase : List[Any] = [1, 6, vocab_size] self.assertEqual(output.shape , UpperCAmelCase ) print(output[:, :3, :3] ) # TODO Replace values below with what was printed above. __lowerCamelCase : Optional[int] = tf.constant( [ [ [-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6], [-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7], [-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1E-4 ) @require_tf class _snake_case ( unittest.TestCase ): snake_case__ = 1E-4 def lowerCamelCase__ ( self : List[Any] ): __lowerCamelCase : Dict = tf.constant([[4, 10]] ) __lowerCamelCase : Any = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 ) __lowerCamelCase : Tuple = emba(input_ids.shape ) __lowerCamelCase : List[str] = tf.constant( [[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] ) tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance ) def lowerCamelCase__ ( self : Optional[int] ): __lowerCamelCase : Tuple = tf.constant( [ [0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0], [0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7], [0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0], ] ) __lowerCamelCase : Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 ) emba([2, 16, 512] ) __lowerCamelCase : Tuple = emba.weight[:3, :5] tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance ) @require_tf class _snake_case ( unittest.TestCase ): snake_case__ = 1E-4 def lowerCamelCase__ ( self : List[str] ): # 2,12,16,64 __lowerCamelCase : Optional[int] = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __lowerCamelCase : Optional[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100 __lowerCamelCase : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 ) __lowerCamelCase : Union[str, Any] = embed_positions([2, 16, 768] )[None, None, :, :] __lowerCamelCase , __lowerCamelCase : Optional[Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) __lowerCamelCase : Tuple = tf.constant( [ [0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0], [-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3], [-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5], [-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1], [0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0], [3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3], ] ) __lowerCamelCase : List[Any] = tf.constant( [ [0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0], [0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3], [1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5], [2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1], [-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0], [-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3], ] ) tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance ) tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
135
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __A = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ '''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MraForMaskedLM''', '''MraForMultipleChoice''', '''MraForQuestionAnswering''', '''MraForSequenceClassification''', '''MraForTokenClassification''', '''MraLayer''', '''MraModel''', '''MraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
135
1
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any = 400_0000 ) -> str: UpperCAmelCase_ = [] UpperCAmelCase_ , UpperCAmelCase_ = 0, 1 while b <= n: if b % 2 == 0: even_fibs.append(__UpperCamelCase ) UpperCAmelCase_ , UpperCAmelCase_ = b, a + b return sum(__UpperCamelCase ) if __name__ == "__main__": print(F"{solution() = }")
358
import argparse from tax import checkpoints from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : str ) -> str: UpperCAmelCase_ = AutoConfig.from_pretrained(__UpperCamelCase ) UpperCAmelCase_ = FlaxAutoModelForSeqaSeqLM.from_config(config=__UpperCamelCase ) UpperCAmelCase_ = checkpoints.load_tax_checkpoint(__UpperCamelCase ) UpperCAmelCase_ = '''wi_0''' in tax_model['''target''']['''encoder''']['''layers_0''']['''mlp'''] if config.model_type == "t5": UpperCAmelCase_ = '''SelfAttention''' if config.model_type == "longt5" and config.encoder_attention_type == "local": UpperCAmelCase_ = '''LocalSelfAttention''' elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = '''TransientGlobalSelfAttention''' else: raise ValueError( '''Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`''' ''' attribute with a value from [\'local\', \'transient-global].''' ) # Encoder for layer_index in range(config.num_layers ): UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}' # Self-Attention UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''key''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''out''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''query''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''value''']['''kernel'''] # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''attention''']['''T5LayerNorm_0''']['''scale'''] # Layer Normalization UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_attention_layer_norm''']['''scale'''] if split_mlp_wi: UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization UpperCAmelCase_ = tax_model['''target''']['''encoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning UpperCAmelCase_ = flax_model.params['''encoder''']['''block'''][str(__UpperCamelCase )]['''layer'''] UpperCAmelCase_ = tax_attention_key UpperCAmelCase_ = tax_attention_out UpperCAmelCase_ = tax_attention_query UpperCAmelCase_ = tax_attention_value UpperCAmelCase_ = tax_attention_layer_norm # Global input layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = tax_global_layer_norm if split_mlp_wi: UpperCAmelCase_ = tax_mlp_wi_a UpperCAmelCase_ = tax_mlp_wi_a else: UpperCAmelCase_ = tax_mlp_wi UpperCAmelCase_ = tax_mlp_wo UpperCAmelCase_ = tax_mlp_layer_norm UpperCAmelCase_ = flax_model_encoder_layer_block # Only for layer 0: UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''relpos_bias''']['''rel_embedding'''].T UpperCAmelCase_ = tax_encoder_rel_embedding # Side/global relative position_bias + layer norm if config.model_type == "longt5" and config.encoder_attention_type == "transient-global": UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''side_relpos_bias''']['''rel_embedding'''].T UpperCAmelCase_ = tax_encoder_global_rel_embedding # Assigning UpperCAmelCase_ = tax_model['''target''']['''encoder''']['''encoder_norm''']['''scale'''] UpperCAmelCase_ = tax_encoder_norm # Decoder for layer_index in range(config.num_layers ): UpperCAmelCase_ = f'layers_{str(__UpperCamelCase )}' # Self-Attention UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''key''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''out''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''query''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''self_attention''']['''value''']['''kernel'''] # Layer Normalization UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_self_attention_layer_norm'''][ '''scale''' ] # Encoder-Decoder-Attention UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''encoder_decoder_attention'''] UpperCAmelCase_ = tax_enc_dec_attention_module['''key''']['''kernel'''] UpperCAmelCase_ = tax_enc_dec_attention_module['''out''']['''kernel'''] UpperCAmelCase_ = tax_enc_dec_attention_module['''query''']['''kernel'''] UpperCAmelCase_ = tax_enc_dec_attention_module['''value''']['''kernel'''] # Layer Normalization UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_cross_attention_layer_norm''']['''scale'''] # MLP if split_mlp_wi: UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_0''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi_1''']['''kernel'''] else: UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wi''']['''kernel'''] UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''mlp''']['''wo''']['''kernel'''] # Layer Normalization UpperCAmelCase_ = tax_model['''target''']['''decoder'''][layer_name]['''pre_mlp_layer_norm''']['''scale'''] # Assigning UpperCAmelCase_ = flax_model.params['''decoder''']['''block'''][str(__UpperCamelCase )]['''layer'''] UpperCAmelCase_ = tax_attention_key UpperCAmelCase_ = tax_attention_out UpperCAmelCase_ = tax_attention_query UpperCAmelCase_ = tax_attention_value UpperCAmelCase_ = tax_pre_attention_layer_norm UpperCAmelCase_ = tax_enc_dec_attention_key UpperCAmelCase_ = tax_enc_dec_attention_out UpperCAmelCase_ = tax_enc_dec_attention_query UpperCAmelCase_ = tax_enc_dec_attention_value UpperCAmelCase_ = tax_cross_layer_norm if split_mlp_wi: UpperCAmelCase_ = tax_mlp_wi_a UpperCAmelCase_ = tax_mlp_wi_a else: UpperCAmelCase_ = tax_mlp_wi UpperCAmelCase_ = tax_mlp_wo UpperCAmelCase_ = txa_mlp_layer_norm UpperCAmelCase_ = flax_model_decoder_layer_block # Decoder Normalization UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''decoder_norm''']['''scale'''] UpperCAmelCase_ = txa_decoder_norm # Only for layer 0: UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''relpos_bias''']['''rel_embedding'''].T UpperCAmelCase_ = tax_decoder_rel_embedding # Token Embeddings UpperCAmelCase_ = tax_model['''target''']['''token_embedder''']['''embedding'''] UpperCAmelCase_ = txa_token_embeddings # LM Head (only in v1.1 and LongT5 checkpoints) if "logits_dense" in tax_model["target"]["decoder"]: UpperCAmelCase_ = tax_model['''target''']['''decoder''']['''logits_dense''']['''kernel'''] flax_model.save_pretrained(__UpperCamelCase ) print('''T5X Model was sucessfully converted!''' ) if __name__ == "__main__": _lowerCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.' ) parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.') parser.add_argument( '--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.' ) _lowerCamelCase = parser.parse_args() convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
177
0
def A_ ( _UpperCAmelCase = 10_00 ): return sum(e for e in range(3 , _UpperCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(f'''{solution() = }''')
13
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class __lowercase ( unittest.TestCase ): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int): SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length return scores def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: Dict = None SCREAMING_SNAKE_CASE_: str = 20 SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__) # tweak scores to not be uniform anymore SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch # compute softmax SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1) SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5) SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3) SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1) SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3)) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3)) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max()) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min()) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max()) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min()) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: List[str] = None SCREAMING_SNAKE_CASE_: str = 10 SCREAMING_SNAKE_CASE_: Tuple = 2 # create ramp distribution SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3) SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False]) self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True]) # check special case SCREAMING_SNAKE_CASE_: Any = 5 SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3) SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy() SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2]) def _SCREAMING_SNAKE_CASE ( self : int): SCREAMING_SNAKE_CASE_: Tuple = None SCREAMING_SNAKE_CASE_: Dict = 10 SCREAMING_SNAKE_CASE_: Dict = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]])) SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8) SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]]) self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3)) # check edge cases with negative and extreme logits SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0) SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2]) def _SCREAMING_SNAKE_CASE ( self : Tuple): SCREAMING_SNAKE_CASE_: Tuple = 20 SCREAMING_SNAKE_CASE_: List[str] = 4 SCREAMING_SNAKE_CASE_: Optional[int] = 0 SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__) # check that min length is applied at length 5 SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20) SCREAMING_SNAKE_CASE_: int = 5 SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")]) # check that min length is not applied anymore at length 15 SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = 15 SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertFalse(jnp.isinf(lowerCAmelCase__).any()) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]): SCREAMING_SNAKE_CASE_: int = 20 SCREAMING_SNAKE_CASE_: str = 4 SCREAMING_SNAKE_CASE_: List[Any] = 0 SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__) # check that all scores are -inf except the bos_token_id score SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20) SCREAMING_SNAKE_CASE_: List[str] = 1 SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all()) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 SCREAMING_SNAKE_CASE_: List[Any] = 3 SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertFalse(jnp.isinf(lowerCAmelCase__).any()) def _SCREAMING_SNAKE_CASE ( self : List[str]): SCREAMING_SNAKE_CASE_: Any = 20 SCREAMING_SNAKE_CASE_: Optional[Any] = 4 SCREAMING_SNAKE_CASE_: Dict = 0 SCREAMING_SNAKE_CASE_: List[Any] = 5 SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__) # check that all scores are -inf except the eos_token_id when max_length is reached SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20) SCREAMING_SNAKE_CASE_: Optional[int] = 4 SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all()) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached SCREAMING_SNAKE_CASE_: List[str] = 3 SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) self.assertFalse(jnp.isinf(lowerCAmelCase__).any()) def _SCREAMING_SNAKE_CASE ( self : Optional[int]): SCREAMING_SNAKE_CASE_: int = 4 SCREAMING_SNAKE_CASE_: List[Any] = 10 SCREAMING_SNAKE_CASE_: int = 15 SCREAMING_SNAKE_CASE_: Dict = 2 SCREAMING_SNAKE_CASE_: int = 1 SCREAMING_SNAKE_CASE_: List[Any] = 15 # dummy input_ids and scores SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = input_ids.copy() SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy() # instantiate all dist processors SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5) SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3) SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = 10 # no processor list SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # with processor list SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist()) def _SCREAMING_SNAKE_CASE ( self : Any): SCREAMING_SNAKE_CASE_: Optional[int] = 4 SCREAMING_SNAKE_CASE_: int = 10 SCREAMING_SNAKE_CASE_: List[str] = 15 SCREAMING_SNAKE_CASE_: List[Any] = 2 SCREAMING_SNAKE_CASE_: Union[str, Any] = 1 SCREAMING_SNAKE_CASE_: str = 15 # dummy input_ids and scores SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy() SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = scores.copy() # instantiate all dist processors SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5) SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3) SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8) # instantiate all logits processors SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: str = 10 # no processor list def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict): SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) return scores # with processor list def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]): SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc]) SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__) return scores SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__) SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__) # scores should be equal self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3)) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
13
1
from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase ( lowercase__ ): '''simple docstring''' def __init__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , **UpperCamelCase_ , ): super().__init__( UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , ) lowercase_ :Union[str, Any] = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths} lowercase_ :List[str] = Text( cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , **UpperCamelCase_ , ) def UpperCamelCase ( self ): # Build iterable dataset if self.streaming: lowercase_ :Union[str, Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase_ :str = None lowercase_ :Optional[Any] = None lowercase_ :List[str] = None lowercase_ :Union[str, Any] = None self.builder.download_and_prepare( download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , ) lowercase_ :Optional[int] = self.builder.as_dataset( split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory ) return dataset
252
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Any = { "edbeeching/decision-transformer-gym-hopper-medium": ( "https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class UpperCamelCase ( lowercase__ ): '''simple docstring''' lowercase : Optional[int] ="""decision_transformer""" lowercase : Dict =["""past_key_values"""] lowercase : Any ={ """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , UpperCamelCase_=17 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=4096 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=1024 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_="relu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=False , UpperCamelCase_=False , **UpperCamelCase_ , ): lowercase_ :Any = state_dim lowercase_ :List[str] = act_dim lowercase_ :List[str] = hidden_size lowercase_ :int = max_ep_len lowercase_ :List[str] = action_tanh lowercase_ :Any = vocab_size lowercase_ :List[Any] = n_positions lowercase_ :List[str] = n_layer lowercase_ :Optional[Any] = n_head lowercase_ :int = n_inner lowercase_ :List[str] = activation_function lowercase_ :List[str] = resid_pdrop lowercase_ :Dict = embd_pdrop lowercase_ :List[Any] = attn_pdrop lowercase_ :Union[str, Any] = layer_norm_epsilon lowercase_ :List[str] = initializer_range lowercase_ :Any = scale_attn_weights lowercase_ :Union[str, Any] = use_cache lowercase_ :Any = scale_attn_by_inverse_layer_idx lowercase_ :Tuple = reorder_and_upcast_attn lowercase_ :int = bos_token_id lowercase_ :List[str] = eos_token_id super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
252
1
"""simple docstring""" from __future__ import annotations __UpperCamelCase : Dict = 1.6021e-19 # units = C def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , ): if (conductivity, electron_conc, mobility).count(0 ) != 1: raise ValueError('''You cannot supply more or less than 2 values''' ) elif conductivity < 0: raise ValueError('''Conductivity cannot be negative''' ) elif electron_conc < 0: raise ValueError('''Electron concentration cannot be negative''' ) elif mobility < 0: raise ValueError('''mobility cannot be negative''' ) elif conductivity == 0: return ( "conductivity", mobility * electron_conc * ELECTRON_CHARGE, ) elif electron_conc == 0: return ( "electron_conc", conductivity / (mobility * ELECTRON_CHARGE), ) else: return ( "mobility", conductivity / (electron_conc * ELECTRON_CHARGE), ) if __name__ == "__main__": import doctest doctest.testmod()
106
import os from math import logaa def UpperCAmelCase__ ( lowerCamelCase = "base_exp.txt" ): lowercase :float = 0 lowercase :str = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(lowerCamelCase ), lowerCamelCase ) ) ): lowercase , lowercase :str = list(map(lowerCamelCase, line.split("," ) ) ) if x * logaa(lowerCamelCase ) > largest: lowercase :Optional[Any] = x * logaa(lowerCamelCase ) lowercase :Any = i + 1 return result if __name__ == "__main__": print(solution())
236
0
from ....utils import logging A : List[str] = logging.get_logger(__name__) class lowerCamelCase (SCREAMING_SNAKE_CASE__ ): """simple docstring""" def __init__( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any=None , __magic_name__ : List[str]=2_048 ) -> List[Any]: SCREAMING_SNAKE_CASE_ = config.__dict__ SCREAMING_SNAKE_CASE_ = modal_hidden_size if num_labels: SCREAMING_SNAKE_CASE_ = num_labels
305
from __future__ import annotations import numpy as np def a__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.shape(__UpperCamelCase ) if rows != columns: SCREAMING_SNAKE_CASE_ = ( "'table' has to be of square shaped array but got a " F'''{rows}x{columns} array:\n{table}''' ) raise ValueError(__UpperCamelCase ) SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) ) SCREAMING_SNAKE_CASE_ = np.zeros((rows, columns) ) for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) if upper[j][j] == 0: raise ArithmeticError("No LU decomposition exists" ) SCREAMING_SNAKE_CASE_ = (table[i][j] - total) / upper[j][j] SCREAMING_SNAKE_CASE_ = 1 for j in range(__UpperCamelCase , __UpperCamelCase ): SCREAMING_SNAKE_CASE_ = sum(lower[i][k] * upper[k][j] for k in range(__UpperCamelCase ) ) SCREAMING_SNAKE_CASE_ = table[i][j] - total return lower, upper if __name__ == "__main__": import doctest doctest.testmod()
305
1
import warnings from .generation import TFGenerationMixin class A( _lowerCAmelCase ): '''simple docstring''' warnings.warn( '''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will ''' '''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , _lowerCAmelCase , )
204
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class snake_case__ ( unittest.TestCase ): def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = 32 , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = True , lowerCAmelCase__ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , lowerCAmelCase__ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , lowerCAmelCase__ = True , lowerCAmelCase__=7 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=3 , ) -> Union[str, Any]: __magic_name__ : str = parent __magic_name__ : Dict = do_resize __magic_name__ : Union[str, Any] = size if size is not None else {"""shortest_edge""": 2_88} __magic_name__ : Union[str, Any] = size_divisor __magic_name__ : Union[str, Any] = do_rescale __magic_name__ : Dict = rescale_factor __magic_name__ : Union[str, Any] = do_normalize __magic_name__ : List[str] = do_center_crop __magic_name__ : Tuple = image_mean __magic_name__ : Tuple = image_std __magic_name__ : Tuple = do_pad __magic_name__ : int = batch_size __magic_name__ : List[Any] = num_channels __magic_name__ : int = min_resolution __magic_name__ : str = max_resolution def __magic_name__ ( self ) -> str: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int: if not batched: __magic_name__ : Dict = self.size["""shortest_edge"""] __magic_name__ : List[str] = image_inputs[0] if isinstance(lowerCAmelCase__ , Image.Image ): __magic_name__ ,__magic_name__ : List[Any] = image.size else: __magic_name__ ,__magic_name__ : Dict = image.shape[1], image.shape[2] __magic_name__ : List[Any] = size / min(lowerCAmelCase__ , lowerCAmelCase__ ) if h < w: __magic_name__ ,__magic_name__ : str = size, scale * w else: __magic_name__ ,__magic_name__ : Optional[Any] = scale * h, size __magic_name__ : Tuple = int((13_33 / 8_00) * size ) if max(lowerCAmelCase__ , lowerCAmelCase__ ) > max_size: __magic_name__ : Union[str, Any] = max_size / max(lowerCAmelCase__ , lowerCAmelCase__ ) __magic_name__ : Union[str, Any] = newh * scale __magic_name__ : Any = neww * scale __magic_name__ ,__magic_name__ : str = int(newh + 0.5 ), int(neww + 0.5 ) __magic_name__ ,__magic_name__ : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __magic_name__ : Union[str, Any] = [] for image in image_inputs: __magic_name__ ,__magic_name__ : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __magic_name__ : Optional[Any] = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[0] )[0] __magic_name__ : Tuple = max(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case__ ( _lowerCAmelCase , unittest.TestCase ): lowercase__ : int = BridgeTowerImageProcessor if is_vision_available() else None def __magic_name__ ( self ) -> Optional[Any]: __magic_name__ : Any = BridgeTowerImageProcessingTester(self ) @property def __magic_name__ ( self ) -> List[Any]: return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self ) -> Any: __magic_name__ : str = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) ) self.assertTrue(hasattr(lowerCAmelCase__ , """size_divisor""" ) ) def __magic_name__ ( self ) -> Optional[int]: pass def __magic_name__ ( self ) -> Tuple: # Initialize image processor __magic_name__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , Image.Image ) # Test not batched input __magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __magic_name__ ,__magic_name__ : str = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values __magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __magic_name__ ( self ) -> Tuple: # Initialize image processor __magic_name__ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , np.ndarray ) # Test not batched input __magic_name__ : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __magic_name__ ,__magic_name__ : List[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ : Any = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values __magic_name__ ,__magic_name__ : Optional[int] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __magic_name__ ( self ) -> str: # Initialize image processor __magic_name__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ ) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__ , torch.Tensor ) # Test not batched input __magic_name__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __magic_name__ ,__magic_name__ : Any = self.image_processor_tester.get_expected_values(lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ : Dict = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values __magic_name__ ,__magic_name__ : List[str] = self.image_processor_tester.get_expected_values(lowerCAmelCase__ , batched=lowerCAmelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
342
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __A = logging.get_logger(__name__) __A = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = '''xlm-roberta-xl''' def __init__( self , lowerCamelCase__=250_880 , lowerCamelCase__=2_560 , lowerCamelCase__=36 , lowerCamelCase__=32 , lowerCamelCase__=10_240 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=514 , lowerCamelCase__=1 , lowerCamelCase__=0.02 , lowerCamelCase__=1e-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ) -> List[str]: '''simple docstring''' super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ ) __lowerCamelCase = vocab_size __lowerCamelCase = hidden_size __lowerCamelCase = num_hidden_layers __lowerCamelCase = num_attention_heads __lowerCamelCase = hidden_act __lowerCamelCase = intermediate_size __lowerCamelCase = hidden_dropout_prob __lowerCamelCase = attention_probs_dropout_prob __lowerCamelCase = max_position_embeddings __lowerCamelCase = type_vocab_size __lowerCamelCase = initializer_range __lowerCamelCase = layer_norm_eps __lowerCamelCase = position_embedding_type __lowerCamelCase = use_cache __lowerCamelCase = classifier_dropout class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" @property def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __lowerCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __lowerCamelCase = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
348
from __future__ import annotations def lowerCamelCase_ ( UpperCamelCase__ : list[float] , UpperCamelCase__ : list[float] ) -> float: """simple docstring""" __lowerCamelCase = sorted(numsa + numsa ) __lowerCamelCase , __lowerCamelCase = divmod(len(UpperCamelCase__ ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() __A = [float(x) for x in input("Enter the elements of first array: ").split()] __A = [float(x) for x in input("Enter the elements of second array: ").split()] print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
348
1
import os from pathlib import Path def SCREAMING_SNAKE_CASE__ ( ) -> Tuple: '''simple docstring''' from torch.utils.cpp_extension import load lowerCAmelCase : Any = Path(_UpperCAmelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr' lowerCAmelCase : str = [ root / filename for filename in [ 'vision.cpp', os.path.join('cpu', 'ms_deform_attn_cpu.cpp' ), os.path.join('cuda', 'ms_deform_attn_cuda.cu' ), ] ] load( 'MultiScaleDeformableAttention', _UpperCAmelCase, with_cuda=_UpperCAmelCase, extra_include_paths=[str(_UpperCAmelCase )], extra_cflags=['-DWITH_CUDA=1'], extra_cuda_cflags=[ '-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__', ], ) import MultiScaleDeformableAttention as MSDA return MSDA
138
from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Dict = logging.get_logger(__name__) __A : List[Any] = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class __A ( lowerCAmelCase ): lowerCAmelCase_ : str = "ctrl" lowerCAmelCase_ : Optional[Any] = ["past_key_values"] lowerCAmelCase_ : Dict = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Any , UpperCAmelCase_ : int=246534 , UpperCAmelCase_ : Optional[Any]=256 , UpperCAmelCase_ : Any=1280 , UpperCAmelCase_ : int=8192 , UpperCAmelCase_ : int=48 , UpperCAmelCase_ : Optional[Any]=16 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=1E-6 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : int , ): lowerCAmelCase : int = vocab_size lowerCAmelCase : int = n_positions lowerCAmelCase : Optional[Any] = n_embd lowerCAmelCase : Optional[Any] = n_layer lowerCAmelCase : List[str] = n_head lowerCAmelCase : Union[str, Any] = dff lowerCAmelCase : Dict = resid_pdrop lowerCAmelCase : List[Any] = embd_pdrop lowerCAmelCase : List[Any] = layer_norm_epsilon lowerCAmelCase : Dict = initializer_range lowerCAmelCase : Union[str, Any] = use_cache super().__init__(**UpperCAmelCase_ )
138
1
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __UpperCAmelCase :Optional[int] = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] __UpperCAmelCase :Union[str, Any] = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] __UpperCAmelCase :Dict = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) __UpperCAmelCase :Tuple = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) __UpperCAmelCase :Any = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def _a ( _lowercase : Optional[Any] , _lowercase : Any ): '''simple docstring''' for tf_name, hf_name in patterns: __UpperCAmelCase : Any = k.replace(_lowercase , _lowercase ) return k def _a ( _lowercase : dict , _lowercase : dict ): '''simple docstring''' __UpperCAmelCase : str = BigBirdPegasusConfig(**_lowercase ) __UpperCAmelCase : Optional[int] = BigBirdPegasusForConditionalGeneration(_lowercase ) __UpperCAmelCase : List[Any] = torch_model.state_dict() __UpperCAmelCase : List[str] = {} # separating decoder weights __UpperCAmelCase : Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith('''pegasus/decoder''' )} __UpperCAmelCase : Optional[Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('''pegasus/decoder''' )} for k, v in tqdm(decoder_weights.items() , '''tf -> hf conversion''' ): __UpperCAmelCase : Dict = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE] if any(_lowercase ): continue __UpperCAmelCase : List[str] = DECODER_PATTERNS __UpperCAmelCase : List[Any] = rename_state_dict_key(_lowercase , _lowercase ) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __UpperCAmelCase : Dict = v.T __UpperCAmelCase : Optional[int] = torch.from_numpy(_lowercase ) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , '''tf -> hf conversion''' ): __UpperCAmelCase : Union[str, Any] = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE] if any(_lowercase ): continue __UpperCAmelCase : int = REMAINING_PATTERNS __UpperCAmelCase : Optional[int] = rename_state_dict_key(_lowercase , _lowercase ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' ) if any(True if i in k else False for i in ['''dense''', '''query''', '''key''', '''value'''] ): __UpperCAmelCase : Dict = v.T __UpperCAmelCase : Optional[Any] = torch.from_numpy(_lowercase ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' __UpperCAmelCase : Tuple = mapping['''model.embed_positions.weight'''] __UpperCAmelCase : List[str] = mapping.pop('''model.embed_positions.weight''' ) __UpperCAmelCase : Tuple = torch_model.load_state_dict(_lowercase , strict=_lowercase ) __UpperCAmelCase : Optional[int] = [ k for k in missing if k not in [ '''final_logits_bias''', '''model.encoder.embed_tokens.weight''', '''model.decoder.embed_tokens.weight''', '''lm_head.weight''', ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def _a ( _lowercase : List[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = tf.train.list_variables(_lowercase ) __UpperCAmelCase : str = {} __UpperCAmelCase : Union[str, Any] = ['''global_step'''] for name, shape in tqdm(_lowercase , desc='''converting tf checkpoint to dict''' ): __UpperCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name ) if skip_key: continue __UpperCAmelCase : str = tf.train.load_variable(_lowercase , _lowercase ) __UpperCAmelCase : Optional[int] = array return tf_weights def _a ( _lowercase : str , _lowercase : str , _lowercase : dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = get_tf_weights_as_numpy(_lowercase ) __UpperCAmelCase : List[Any] = convert_bigbird_pegasus(_lowercase , _lowercase ) torch_model.save_pretrained(_lowercase ) if __name__ == "__main__": __UpperCAmelCase :Union[str, Any] = argparse.ArgumentParser() parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables") parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.") __UpperCAmelCase :str = parser.parse_args() __UpperCAmelCase :str = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
361
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class a ( _a ): """simple docstring""" SCREAMING_SNAKE_CASE : torch.FloatTensor class a ( _a , _a ): """simple docstring""" @register_to_config def __init__( self : str , snake_case : int = 32 , snake_case : int = 64 , snake_case : int = 20 , snake_case : int = 768 , snake_case : Tuple=77 , snake_case : List[Any]=4 , snake_case : float = 0.0 , snake_case : str = "silu" , snake_case : Optional[str] = None , snake_case : Optional[str] = None , snake_case : Optional[str] = "linear" , snake_case : Optional[str] = "prd" , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , ) -> List[str]: super().__init__() __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : List[str] = attention_head_dim __UpperCAmelCase : int = num_attention_heads * attention_head_dim __UpperCAmelCase : List[Any] = additional_embeddings __UpperCAmelCase : Any = time_embed_dim or inner_dim __UpperCAmelCase : Any = embedding_proj_dim or embedding_dim __UpperCAmelCase : Union[str, Any] = clip_embed_dim or embedding_dim __UpperCAmelCase : List[Any] = Timesteps(snake_case , snake_case , 0 ) __UpperCAmelCase : Optional[Any] = TimestepEmbedding(snake_case , snake_case , out_dim=snake_case , act_fn=snake_case ) __UpperCAmelCase : str = nn.Linear(snake_case , snake_case ) if embedding_proj_norm_type is None: __UpperCAmelCase : str = None elif embedding_proj_norm_type == "layer": __UpperCAmelCase : str = nn.LayerNorm(snake_case ) else: raise ValueError(f'unsupported embedding_proj_norm_type: {embedding_proj_norm_type}' ) __UpperCAmelCase : List[Any] = nn.Linear(snake_case , snake_case ) if encoder_hid_proj_type is None: __UpperCAmelCase : Union[str, Any] = None elif encoder_hid_proj_type == "linear": __UpperCAmelCase : Any = nn.Linear(snake_case , snake_case ) else: raise ValueError(f'unsupported encoder_hid_proj_type: {encoder_hid_proj_type}' ) __UpperCAmelCase : Dict = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , snake_case ) ) if added_emb_type == "prd": __UpperCAmelCase : Any = nn.Parameter(torch.zeros(1 , 1 , snake_case ) ) elif added_emb_type is None: __UpperCAmelCase : List[Any] = None else: raise ValueError( f'`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.' ) __UpperCAmelCase : Optional[int] = nn.ModuleList( [ BasicTransformerBlock( snake_case , snake_case , snake_case , dropout=snake_case , activation_fn='''gelu''' , attention_bias=snake_case , ) for d in range(snake_case ) ] ) if norm_in_type == "layer": __UpperCAmelCase : Tuple = nn.LayerNorm(snake_case ) elif norm_in_type is None: __UpperCAmelCase : List[Any] = None else: raise ValueError(f'Unsupported norm_in_type: {norm_in_type}.' ) __UpperCAmelCase : Dict = nn.LayerNorm(snake_case ) __UpperCAmelCase : Any = nn.Linear(snake_case , snake_case ) __UpperCAmelCase : Any = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 ) causal_attention_mask.triu_(1 ) __UpperCAmelCase : str = causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''' , snake_case , persistent=snake_case ) __UpperCAmelCase : Tuple = nn.Parameter(torch.zeros(1 , snake_case ) ) __UpperCAmelCase : List[str] = nn.Parameter(torch.zeros(1 , snake_case ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowerCamelCase__ ( self : int ) -> Dict[str, AttentionProcessor]: __UpperCAmelCase : Optional[Any] = {} def fn_recursive_add_processors(snake_case : str , snake_case : torch.nn.Module , snake_case : Dict[str, AttentionProcessor] ): if hasattr(snake_case , '''set_processor''' ): __UpperCAmelCase : Union[str, Any] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'{name}.{sub_name}' , snake_case , snake_case ) return processors for name, module in self.named_children(): fn_recursive_add_processors(snake_case , snake_case , snake_case ) return processors def lowerCamelCase__ ( self : Optional[Any] , snake_case : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Union[str, Any]: __UpperCAmelCase : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(snake_case , snake_case ) and len(snake_case ) != count: raise ValueError( f'A dict of processors was passed, but the number of processors {len(snake_case )} does not match the' f' number of attention layers: {count}. Please make sure to pass {count} processor classes.' ) def fn_recursive_attn_processor(snake_case : str , snake_case : torch.nn.Module , snake_case : int ): if hasattr(snake_case , '''set_processor''' ): if not isinstance(snake_case , snake_case ): module.set_processor(snake_case ) else: module.set_processor(processor.pop(f'{name}.processor' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'{name}.{sub_name}' , snake_case , snake_case ) for name, module in self.named_children(): fn_recursive_attn_processor(snake_case , snake_case , snake_case ) def lowerCamelCase__ ( self : str ) -> Tuple: self.set_attn_processor(AttnProcessor() ) def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[Any] , snake_case : Union[torch.Tensor, float, int] , snake_case : torch.FloatTensor , snake_case : Optional[torch.FloatTensor] = None , snake_case : Optional[torch.BoolTensor] = None , snake_case : bool = True , ) -> List[Any]: __UpperCAmelCase : Any = hidden_states.shape[0] __UpperCAmelCase : Optional[int] = timestep if not torch.is_tensor(snake_case ): __UpperCAmelCase : str = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(snake_case ) and len(timesteps.shape ) == 0: __UpperCAmelCase : Any = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML __UpperCAmelCase : Optional[int] = timesteps * torch.ones(snake_case , dtype=timesteps.dtype , device=timesteps.device ) __UpperCAmelCase : Tuple = self.time_proj(snake_case ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. __UpperCAmelCase : int = timesteps_projected.to(dtype=self.dtype ) __UpperCAmelCase : Optional[int] = self.time_embedding(snake_case ) if self.embedding_proj_norm is not None: __UpperCAmelCase : Optional[Any] = self.embedding_proj_norm(snake_case ) __UpperCAmelCase : str = self.embedding_proj(snake_case ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: __UpperCAmelCase : Dict = self.encoder_hidden_states_proj(snake_case ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' ) __UpperCAmelCase : Optional[int] = self.proj_in(snake_case ) __UpperCAmelCase : Optional[Any] = self.positional_embedding.to(hidden_states.dtype ) __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : Optional[Any] = 0 if encoder_hidden_states is not None: additional_embeds.append(snake_case ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: __UpperCAmelCase : Optional[int] = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: __UpperCAmelCase : Union[str, Any] = hidden_states[:, None, :] __UpperCAmelCase : Union[str, Any] = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: __UpperCAmelCase : Any = self.prd_embedding.to(hidden_states.dtype ).expand(snake_case , -1 , -1 ) additional_embeds.append(snake_case ) __UpperCAmelCase : Dict = torch.cat( snake_case , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens __UpperCAmelCase : str = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: __UpperCAmelCase : Union[str, Any] = F.pad( snake_case , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) __UpperCAmelCase : Optional[int] = hidden_states + positional_embeddings if attention_mask is not None: __UpperCAmelCase : List[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0 __UpperCAmelCase : str = F.pad(snake_case , (0, self.additional_embeddings) , value=0.0 ) __UpperCAmelCase : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) __UpperCAmelCase : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: __UpperCAmelCase : str = self.norm_in(snake_case ) for block in self.transformer_blocks: __UpperCAmelCase : Optional[int] = block(snake_case , attention_mask=snake_case ) __UpperCAmelCase : int = self.norm_out(snake_case ) if self.prd_embedding is not None: __UpperCAmelCase : Optional[int] = hidden_states[:, -1] else: __UpperCAmelCase : List[Any] = hidden_states[:, additional_embeddings_len:] __UpperCAmelCase : Dict = self.proj_to_clip_embeddings(snake_case ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=snake_case ) def lowerCamelCase__ ( self : Optional[int] , snake_case : List[str] ) -> str: __UpperCAmelCase : Dict = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
240
0